Commit b8985785 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-next

Just some misc bug fixes for 4.9.

* 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux:
  drm/amdgpu: revert "use more than 64KB fragment size if possible"
  drm/amdgpu: warn if dp aux is still attached on free
  drm/amdgpu/dce11: add missing drm_mode_config_cleanup call
  drm/amdgpu: also track late init state
  drm/amdgpu/virtual_dce: adjust config ifdef
  drm/amdgpu/vce: add support for hw config packet (v2)
  drm/amdgpu: clean up to set fw_offset as 0 twice
  drm/amdgpu: remove DRM_AMD_POWERPLAY
  drm/radeon: Prevent races on pre DCE4 between flip submission and completion.
  drm/radeon: Slightly more robust flip completion handling for < DCE-4
parents 9c704d14 8036617e
...@@ -32,5 +32,4 @@ config DRM_AMDGPU_GART_DEBUGFS ...@@ -32,5 +32,4 @@ config DRM_AMDGPU_GART_DEBUGFS
Selecting this option creates a debugfs file to inspect the mapped Selecting this option creates a debugfs file to inspect the mapped
pages. Uses more memory for housekeeping, enable only for debugging. pages. Uses more memory for housekeeping, enable only for debugging.
source "drivers/gpu/drm/amd/powerplay/Kconfig"
source "drivers/gpu/drm/amd/acp/Kconfig" source "drivers/gpu/drm/amd/acp/Kconfig"
...@@ -111,14 +111,10 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o ...@@ -111,14 +111,10 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
ifneq ($(CONFIG_DRM_AMD_POWERPLAY),)
include $(FULL_AMD_PATH)/powerplay/Makefile include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES) amdgpu-y += $(AMD_POWERPLAY_FILES)
endif
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
CFLAGS_amdgpu_trace_points.o := -I$(src) CFLAGS_amdgpu_trace_points.o := -I$(src)
...@@ -1943,6 +1943,7 @@ struct amdgpu_ip_block_status { ...@@ -1943,6 +1943,7 @@ struct amdgpu_ip_block_status {
bool valid; bool valid;
bool sw; bool sw;
bool hw; bool hw;
bool late_initialized;
bool hang; bool hang;
}; };
......
...@@ -769,8 +769,10 @@ static void amdgpu_connector_destroy(struct drm_connector *connector) ...@@ -769,8 +769,10 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
{ {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->ddc_bus->has_aux) if (amdgpu_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false;
}
amdgpu_connector_free_edid(connector); amdgpu_connector_free_edid(connector);
kfree(amdgpu_connector->con_priv); kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector); drm_connector_unregister(connector);
......
...@@ -1424,6 +1424,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev) ...@@ -1424,6 +1424,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r; return r;
} }
adev->ip_block_status[i].late_initialized = true;
} }
} }
...@@ -1469,8 +1470,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) ...@@ -1469,8 +1470,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
} }
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].late_initialized)
continue;
if (adev->ip_blocks[i].funcs->late_fini) if (adev->ip_blocks[i].funcs->late_fini)
adev->ip_blocks[i].funcs->late_fini((void *)adev); adev->ip_blocks[i].funcs->late_fini((void *)adev);
adev->ip_block_status[i].late_initialized = false;
} }
return 0; return 0;
......
...@@ -174,7 +174,6 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); ...@@ -174,7 +174,6 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444); module_param_named(powerplay, amdgpu_powerplay, int, 0444);
...@@ -183,7 +182,6 @@ module_param_named(powercontainment, amdgpu_powercontainment, int, 0444); ...@@ -183,7 +182,6 @@ module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444); module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
#endif
MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)"); MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444); module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
......
...@@ -220,6 +220,7 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c) ...@@ -220,6 +220,7 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
{ {
if (!i2c) if (!i2c)
return; return;
WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter); i2c_del_adapter(&i2c->adapter);
kfree(i2c); kfree(i2c);
} }
......
...@@ -42,7 +42,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) ...@@ -42,7 +42,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
amd_pp = &(adev->powerplay); amd_pp = &(adev->powerplay);
if (adev->pp_enabled) { if (adev->pp_enabled) {
#ifdef CONFIG_DRM_AMD_POWERPLAY
struct amd_pp_init *pp_init; struct amd_pp_init *pp_init;
pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL); pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL);
...@@ -55,7 +54,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) ...@@ -55,7 +54,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
pp_init->device = amdgpu_cgs_create_device(adev); pp_init->device = amdgpu_cgs_create_device(adev);
ret = amd_powerplay_init(pp_init, amd_pp); ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init); kfree(pp_init);
#endif
} else { } else {
amd_pp->pp_handle = (void *)adev; amd_pp->pp_handle = (void *)adev;
...@@ -97,7 +95,6 @@ static int amdgpu_pp_early_init(void *handle) ...@@ -97,7 +95,6 @@ static int amdgpu_pp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0; int ret = 0;
#ifdef CONFIG_DRM_AMD_POWERPLAY
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_POLARIS11: case CHIP_POLARIS11:
case CHIP_POLARIS10: case CHIP_POLARIS10:
...@@ -120,9 +117,6 @@ static int amdgpu_pp_early_init(void *handle) ...@@ -120,9 +117,6 @@ static int amdgpu_pp_early_init(void *handle)
adev->pp_enabled = false; adev->pp_enabled = false;
break; break;
} }
#else
adev->pp_enabled = false;
#endif
ret = amdgpu_powerplay_init(adev); ret = amdgpu_powerplay_init(adev);
if (ret) if (ret)
...@@ -144,12 +138,11 @@ static int amdgpu_pp_late_init(void *handle) ...@@ -144,12 +138,11 @@ static int amdgpu_pp_late_init(void *handle)
ret = adev->powerplay.ip_funcs->late_init( ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled && adev->pm.dpm_enabled) { if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev); amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
} }
#endif
return ret; return ret;
} }
...@@ -162,10 +155,8 @@ static int amdgpu_pp_sw_init(void *handle) ...@@ -162,10 +155,8 @@ static int amdgpu_pp_sw_init(void *handle)
ret = adev->powerplay.ip_funcs->sw_init( ret = adev->powerplay.ip_funcs->sw_init(
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled) if (adev->pp_enabled)
adev->pm.dpm_enabled = true; adev->pm.dpm_enabled = true;
#endif
return ret; return ret;
} }
...@@ -216,7 +207,6 @@ static int amdgpu_pp_hw_fini(void *handle) ...@@ -216,7 +207,6 @@ static int amdgpu_pp_hw_fini(void *handle)
static void amdgpu_pp_late_fini(void *handle) static void amdgpu_pp_late_fini(void *handle)
{ {
#ifdef CONFIG_DRM_AMD_POWERPLAY
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled) { if (adev->pp_enabled) {
...@@ -227,7 +217,6 @@ static void amdgpu_pp_late_fini(void *handle) ...@@ -227,7 +217,6 @@ static void amdgpu_pp_late_fini(void *handle)
if (adev->powerplay.ip_funcs->late_fini) if (adev->powerplay.ip_funcs->late_fini)
adev->powerplay.ip_funcs->late_fini( adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
#endif
} }
static int amdgpu_pp_suspend(void *handle) static int amdgpu_pp_suspend(void *handle)
......
...@@ -273,7 +273,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) ...@@ -273,7 +273,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
amdgpu_bo_unreserve(*bo); amdgpu_bo_unreserve(*bo);
fw_offset = 0;
for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) { for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
ucode = &adev->firmware.ucode[i]; ucode = &adev->firmware.ucode[i];
if (ucode->fw) { if (ucode->fw) {
......
...@@ -699,6 +699,20 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -699,6 +699,20 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
case 0x05000009: /* clock table */ case 0x05000009: /* clock table */
break; break;
case 0x0500000c: /* hw config */
switch (p->adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
case CHIP_MULLINS:
#endif
case CHIP_CARRIZO:
break;
default:
r = -EINVAL;
goto out;
}
break;
case 0x03000001: /* encode */ case 0x03000001: /* encode */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
*size, 0); *size, 0);
......
...@@ -878,13 +878,13 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, ...@@ -878,13 +878,13 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* allocation size to the fragment size. * allocation size to the fragment size.
*/ */
const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; /* SI and newer are optimized for 64KB */
uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
uint64_t frag_start = ALIGN(start, frag_align); uint64_t frag_start = ALIGN(start, frag_align);
uint64_t frag_end = end & ~(frag_align - 1); uint64_t frag_end = end & ~(frag_align - 1);
uint32_t frag;
/* system pages are non continuously */ /* system pages are non continuously */
if (params->src || !(flags & AMDGPU_PTE_VALID) || if (params->src || !(flags & AMDGPU_PTE_VALID) ||
(frag_start >= frag_end)) { (frag_start >= frag_end)) {
...@@ -893,10 +893,6 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, ...@@ -893,10 +893,6 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
return; return;
} }
/* use more than 64KB fragment size if possible */
frag = lower_32_bits(frag_start | frag_end);
frag = likely(frag) ? __ffs(frag) : 31;
/* handle the 4K area at the beginning */ /* handle the 4K area at the beginning */
if (start != frag_start) { if (start != frag_start) {
amdgpu_vm_update_ptes(params, vm, start, frag_start, amdgpu_vm_update_ptes(params, vm, start, frag_start,
...@@ -906,7 +902,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, ...@@ -906,7 +902,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
/* handle the area in the middle */ /* handle the area in the middle */
amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst, amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
flags | AMDGPU_PTE_FRAG(frag)); flags | frag_flags);
/* handle the 4K area at the end */ /* handle the 4K area at the end */
if (frag_end != end) { if (frag_end != end) {
......
...@@ -3159,6 +3159,7 @@ static int dce_v11_0_sw_fini(void *handle) ...@@ -3159,6 +3159,7 @@ static int dce_v11_0_sw_fini(void *handle)
dce_v11_0_afmt_fini(adev); dce_v11_0_afmt_fini(adev);
drm_mode_config_cleanup(adev->ddev);
adev->mode_info.mode_config_initialized = false; adev->mode_info.mode_config_initialized = false;
return 0; return 0;
......
...@@ -99,15 +99,15 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, ...@@ -99,15 +99,15 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save) struct amdgpu_mode_mc_save *save)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_HAWAII: case CHIP_HAWAII:
case CHIP_KAVERI: case CHIP_KAVERI:
case CHIP_KABINI: case CHIP_KABINI:
case CHIP_MULLINS: case CHIP_MULLINS:
#ifdef CONFIG_DRM_AMDGPU_CIK
dce_v8_0_disable_dce(adev); dce_v8_0_disable_dce(adev);
#endif
break; break;
#endif
case CHIP_FIJI: case CHIP_FIJI:
case CHIP_TONGA: case CHIP_TONGA:
dce_v10_0_disable_dce(adev); dce_v10_0_disable_dce(adev);
......
config DRM_AMD_POWERPLAY
bool "Enable AMD powerplay component"
depends on DRM_AMDGPU
default n
help
select this option will enable AMD powerplay component.
...@@ -1638,8 +1638,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -1638,8 +1638,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(viewport_w << 16) | viewport_h); (viewport_w << 16) | viewport_h);
/* set pageflip to happen anywhere in vblank interval */ /* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb); radeon_fb = to_radeon_framebuffer(fb);
......
...@@ -321,16 +321,30 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) ...@@ -321,16 +321,30 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
update_pending = radeon_page_flip_pending(rdev, crtc_id); update_pending = radeon_page_flip_pending(rdev, crtc_id);
/* Has the pageflip already completed in crtc, or is it certain /* Has the pageflip already completed in crtc, or is it certain
* to complete in this vblank? * to complete in this vblank? GET_DISTANCE_TO_VBLANKSTART provides
* distance to start of "fudged earlier" vblank in vpos, distance to
* start of real vblank in hpos. vpos >= 0 && hpos < 0 means we are in
* the last few scanlines before start of real vblank, where the vblank
* irq can fire, so we have sampled update_pending a bit too early and
* know the flip will complete at leading edge of the upcoming real
* vblank. On pre-AVIVO hardware, flips also complete inside the real
* vblank, not only at leading edge, so if update_pending for hpos >= 0
* == inside real vblank, the flip will complete almost immediately.
* Note that this method of completion handling is still not 100% race
* free, as we could execute before the radeon_flip_work_func managed
* to run and set the RADEON_FLIP_SUBMITTED status, thereby we no-op,
* but the flip still gets programmed into hw and completed during
* vblank, leading to a delayed emission of the flip completion event.
* This applies at least to pre-AVIVO hardware, where flips are always
* completing inside vblank, not only at leading edge of vblank.
*/ */
if (update_pending && if (update_pending &&
(DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, (DRM_SCANOUTPOS_VALID &
crtc_id, radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
USE_REAL_VBLANKSTART, GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL, &vpos, &hpos, NULL, NULL,
&rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || ((vpos >= 0 && hpos < 0) || (hpos >= 0 && !ASIC_IS_AVIVO(rdev)))) {
(vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
/* crtc didn't flip in this target vblank interval, /* crtc didn't flip in this target vblank interval,
* but flip is pending in crtc. Based on the current * but flip is pending in crtc. Based on the current
* scanout position we know that the current frame is * scanout position we know that the current frame is
...@@ -438,16 +452,19 @@ static void radeon_flip_work_func(struct work_struct *__work) ...@@ -438,16 +452,19 @@ static void radeon_flip_work_func(struct work_struct *__work)
} }
/* Wait until we're out of the vertical blank period before the one /* Wait until we're out of the vertical blank period before the one
* targeted by the flip * targeted by the flip. Always wait on pre DCE4 to avoid races with
* flip completion handling from vblank irq, as these old asics don't
* have reliable pageflip completion interrupts.
*/ */
while (radeon_crtc->enabled && while (radeon_crtc->enabled &&
(radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0, (radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL, &vpos, &hpos, NULL, NULL,
&crtc->hwmode) &crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank - (!ASIC_IS_AVIVO(rdev) ||
dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0) ((int) (work->target_vblank -
dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)))
usleep_range(1000, 2000); usleep_range(1000, 2000);
/* We borrow the event spin lock for protecting flip_status */ /* We borrow the event spin lock for protecting flip_status */
......
...@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) ...@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
for (i = 0; i < rdev->num_crtc; i++) { for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) { if (save->crtc_enabled[i]) {
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
if ((tmp & 0x7) != 0) { if ((tmp & 0x7) != 3) {
tmp &= ~0x7; tmp &= ~0x7;
tmp |= 0x3;
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
} }
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment