Commit cae5c1ab authored by Alex Hung's avatar Alex Hung Committed by Alex Deucher

drm/amd/display: remove redundant CONFIG_DRM_AMD_DC_DCN in amdgpu_dm

[Why & How]
CONFIG_DRM_AMD_DC_DCN is used by pass the compilation failures, but DC
code should be OS-agnostic.

This patch fixes it by removing unnecessasry CONFIG_DRM_AMD_DC_DCN
in amdgpu_dm directory.
Reviewed-by: default avatarRodrigo Siqueira <Rodrigo.Siqueira@amd.com>
Acked-by: default avatarStylon Wang <stylon.wang@amd.com>
Signed-off-by: default avatarAlex Hung <alex.hung@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 663e4811
...@@ -83,7 +83,6 @@ ...@@ -83,7 +83,6 @@
#include <drm/drm_vblank.h> #include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h> #include <drm/drm_audio_component.h>
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h" #include "dcn/dcn_1_0_offset.h"
...@@ -92,7 +91,6 @@ ...@@ -92,7 +91,6 @@
#include "vega10_ip_offset.h" #include "vega10_ip_offset.h"
#include "soc15_common.h" #include "soc15_common.h"
#endif
#include "modules/inc/mod_freesync.h" #include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h" #include "modules/power/power_helpers.h"
...@@ -603,7 +601,6 @@ static void dm_crtc_high_irq(void *interrupt_params) ...@@ -603,7 +601,6 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/** /**
* dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
...@@ -827,7 +824,6 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) ...@@ -827,7 +824,6 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
if (count > DMUB_TRACE_MAX_READ) if (count > DMUB_TRACE_MAX_READ)
DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
} }
#endif /* CONFIG_DRM_AMD_DC_DCN */
static int dm_set_clockgating_state(void *handle, static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
...@@ -1125,9 +1121,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) ...@@ -1125,9 +1121,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) { switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */ case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
hw_params.dpia_supported = true; hw_params.dpia_supported = true;
#if defined(CONFIG_DRM_AMD_DC_DCN)
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
#endif
break; break;
default: default:
break; break;
...@@ -1189,7 +1183,6 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev) ...@@ -1189,7 +1183,6 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
} }
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{ {
uint64_t pt_base; uint64_t pt_base;
...@@ -1244,8 +1237,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ ...@@ -1244,8 +1237,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pa_config->is_hvm_enabled = 0; pa_config->is_hvm_enabled = 0;
} }
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
static void vblank_control_worker(struct work_struct *work) static void vblank_control_worker(struct work_struct *work)
{ {
struct vblank_control_work *vblank_work = struct vblank_control_work *vblank_work =
...@@ -1282,8 +1274,6 @@ static void vblank_control_worker(struct work_struct *work) ...@@ -1282,8 +1274,6 @@ static void vblank_control_worker(struct work_struct *work)
kfree(vblank_work); kfree(vblank_work);
} }
#endif
static void dm_handle_hpd_rx_offload_work(struct work_struct *work) static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
{ {
struct hpd_rx_irq_offload_work *offload_work; struct hpd_rx_irq_offload_work *offload_work;
...@@ -1410,9 +1400,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1410,9 +1400,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock); mutex_init(&adev->dm.audio_lock);
#if defined(CONFIG_DRM_AMD_DC_DCN)
spin_lock_init(&adev->dm.vblank_lock); spin_lock_init(&adev->dm.vblank_lock);
#endif
if(amdgpu_dm_irq_init(adev)) { if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
...@@ -1505,12 +1493,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1505,12 +1493,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
init_data.flags.edp_no_power_sequencing = true; init_data.flags.edp_no_power_sequencing = true;
#ifdef CONFIG_DRM_AMD_DC_DCN
if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
#endif
init_data.flags.seamless_boot_edp_requested = false; init_data.flags.seamless_boot_edp_requested = false;
...@@ -1566,7 +1552,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1566,7 +1552,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error; goto error;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config; struct dc_phy_addr_space_config pa_config;
...@@ -1575,7 +1560,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1575,7 +1560,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
// Call the DC init_memory func // Call the DC init_memory func
dc_setup_system_context(adev->dm.dc, &pa_config); dc_setup_system_context(adev->dm.dc, &pa_config);
} }
#endif
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) { if (!adev->dm.freesync_module) {
...@@ -1587,14 +1571,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1587,14 +1571,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod(); amdgpu_dm_init_color_mod();
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->dm.dc->caps.max_links > 0) { if (adev->dm.dc->caps.max_links > 0) {
adev->dm.vblank_control_workqueue = adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue"); create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue) if (!adev->dm.vblank_control_workqueue)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
} }
#endif
#ifdef CONFIG_DRM_AMD_DC_HDCP #ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
...@@ -1626,7 +1608,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1626,7 +1608,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
} }
amdgpu_dm_outbox_init(adev); amdgpu_dm_outbox_init(adev);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) { dmub_aux_setconfig_callback, false)) {
DRM_ERROR("amdgpu: fail to register dmub aux callback"); DRM_ERROR("amdgpu: fail to register dmub aux callback");
...@@ -1640,7 +1621,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1640,7 +1621,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub hpd callback"); DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error; goto error;
} }
#endif /* CONFIG_DRM_AMD_DC_DCN */
} }
if (amdgpu_dm_initialize_drm_device(adev)) { if (amdgpu_dm_initialize_drm_device(adev)) {
...@@ -1687,12 +1667,10 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) ...@@ -1687,12 +1667,10 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
{ {
int i; int i;
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->dm.vblank_control_workqueue) { if (adev->dm.vblank_control_workqueue) {
destroy_workqueue(adev->dm.vblank_control_workqueue); destroy_workqueue(adev->dm.vblank_control_workqueue);
adev->dm.vblank_control_workqueue = NULL; adev->dm.vblank_control_workqueue = NULL;
} }
#endif
for (i = 0; i < adev->dm.display_indexes_num; i++) { for (i = 0; i < adev->dm.display_indexes_num; i++) {
drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
...@@ -2403,9 +2381,7 @@ static int dm_suspend(void *handle) ...@@ -2403,9 +2381,7 @@ static int dm_suspend(void *handle)
if (amdgpu_in_reset(adev)) { if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock); mutex_lock(&dm->dc_lock);
#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_allow_idle_optimizations(adev->dm.dc, false); dc_allow_idle_optimizations(adev->dm.dc, false);
#endif
dm->cached_dc_state = dc_copy_state(dm->dc->current_state); dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
...@@ -3558,7 +3534,6 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) ...@@ -3558,7 +3534,6 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
return 0; return 0;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Register IRQ sources and initialize IRQ callbacks */ /* Register IRQ sources and initialize IRQ callbacks */
static int dcn10_register_irq_handlers(struct amdgpu_device *adev) static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
{ {
...@@ -3747,7 +3722,6 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev) ...@@ -3747,7 +3722,6 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
return 0; return 0;
} }
#endif
/* /*
* Acquires the lock for the atomic state object and returns * Acquires the lock for the atomic state object and returns
...@@ -4251,7 +4225,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -4251,7 +4225,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail; goto fail;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Use Outbox interrupt */ /* Use Outbox interrupt */
switch (adev->ip_versions[DCE_HWIP][0]) { switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 0):
...@@ -4284,7 +4257,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -4284,7 +4257,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
break; break;
} }
} }
#endif
/* Disable vblank IRQs aggressively for power-saving. */ /* Disable vblank IRQs aggressively for power-saving. */
adev_to_drm(adev)->vblank_disable_immediate = true; adev_to_drm(adev)->vblank_disable_immediate = true;
...@@ -4380,7 +4352,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -4380,7 +4352,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} }
break; break;
default: default:
#if defined(CONFIG_DRM_AMD_DC_DCN)
switch (adev->ip_versions[DCE_HWIP][0]) { switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1): case IP_VERSION(1, 0, 1):
...@@ -4406,7 +4377,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -4406,7 +4377,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
adev->ip_versions[DCE_HWIP][0]); adev->ip_versions[DCE_HWIP][0]);
goto fail; goto fail;
} }
#endif
break; break;
} }
...@@ -4555,7 +4525,7 @@ static int dm_early_init(void *handle) ...@@ -4555,7 +4525,7 @@ static int dm_early_init(void *handle)
adev->mode_info.num_dig = 6; adev->mode_info.num_dig = 6;
break; break;
default: default:
#if defined(CONFIG_DRM_AMD_DC_DCN)
switch (adev->ip_versions[DCE_HWIP][0]) { switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 2):
case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 0):
...@@ -4592,7 +4562,6 @@ static int dm_early_init(void *handle) ...@@ -4592,7 +4562,6 @@ static int dm_early_init(void *handle)
adev->ip_versions[DCE_HWIP][0]); adev->ip_versions[DCE_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
#endif
break; break;
} }
...@@ -6646,10 +6615,8 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) ...@@ -6646,10 +6615,8 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_display_manager *dm = &adev->dm;
struct vblank_control_work *work; struct vblank_control_work *work;
#endif
int rc = 0; int rc = 0;
if (enable) { if (enable) {
...@@ -6672,7 +6639,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) ...@@ -6672,7 +6639,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return 0; return 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dm->vblank_control_workqueue) { if (dm->vblank_control_workqueue) {
work = kzalloc(sizeof(*work), GFP_ATOMIC); work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) if (!work)
...@@ -6690,7 +6656,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) ...@@ -6690,7 +6656,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
queue_work(dm->vblank_control_workqueue, &work->work); queue_work(dm->vblank_control_workqueue, &work->work);
} }
#endif
return 0; return 0;
} }
...@@ -9365,14 +9330,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -9365,14 +9330,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */ /* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) && if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) { acrtc_state->stream) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* /*
* If PSR or idle optimizations are enabled then flush out * If PSR or idle optimizations are enabled then flush out
* any pending work before hardware programming. * any pending work before hardware programming.
*/ */
if (dm->vblank_control_workqueue) if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue); flush_workqueue(dm->vblank_control_workqueue);
#endif
bundle->stream_update.stream = acrtc_state->stream; bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) { if (new_pcrtc_state->mode_changed) {
...@@ -9705,21 +9668,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -9705,21 +9668,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (dc_state) { if (dc_state) {
/* if there mode set or reset, disable eDP PSR */ /* if there mode set or reset, disable eDP PSR */
if (mode_set_reset_required) { if (mode_set_reset_required) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dm->vblank_control_workqueue) if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue); flush_workqueue(dm->vblank_control_workqueue);
#endif
amdgpu_dm_psr_disable_all(dm); amdgpu_dm_psr_disable_all(dm);
} }
dm_enable_per_frame_crtc_master_sync(dc_state); dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock); mutex_lock(&dm->dc_lock);
WARN_ON(!dc_commit_state(dm->dc, dc_state)); WARN_ON(!dc_commit_state(dm->dc, dc_state));
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Allow idle optimization when vblank count is 0 for display off */ /* Allow idle optimization when vblank count is 0 for display off */
if (dm->active_vblank_irq_count == 0) if (dm->active_vblank_irq_count == 0)
dc_allow_idle_optimizations(dm->dc,true); dc_allow_idle_optimizations(dm->dc,true);
#endif
mutex_unlock(&dm->dc_lock); mutex_unlock(&dm->dc_lock);
} }
......
...@@ -358,14 +358,12 @@ struct amdgpu_display_manager { ...@@ -358,14 +358,12 @@ struct amdgpu_display_manager {
*/ */
struct mutex audio_lock; struct mutex audio_lock;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/** /**
* @vblank_lock: * @vblank_lock:
* *
* Guards access to deferred vblank work state. * Guards access to deferred vblank work state.
*/ */
spinlock_t vblank_lock; spinlock_t vblank_lock;
#endif
/** /**
* @audio_component: * @audio_component:
...@@ -469,14 +467,12 @@ struct amdgpu_display_manager { ...@@ -469,14 +467,12 @@ struct amdgpu_display_manager {
struct hdcp_workqueue *hdcp_workqueue; struct hdcp_workqueue *hdcp_workqueue;
#endif #endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
/** /**
* @vblank_control_workqueue: * @vblank_control_workqueue:
* *
* Deferred work for vblank control events. * Deferred work for vblank control events.
*/ */
struct workqueue_struct *vblank_control_workqueue; struct workqueue_struct *vblank_control_workqueue;
#endif
struct drm_atomic_state *cached_state; struct drm_atomic_state *cached_state;
struct dc_state *cached_dc_state; struct dc_state *cached_dc_state;
...@@ -493,14 +489,12 @@ struct amdgpu_display_manager { ...@@ -493,14 +489,12 @@ struct amdgpu_display_manager {
*/ */
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/** /**
* @active_vblank_irq_count: * @active_vblank_irq_count:
* *
* number of currently active vblank irqs * number of currently active vblank irqs
*/ */
uint32_t active_vblank_irq_count; uint32_t active_vblank_irq_count;
#endif
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/** /**
......
...@@ -291,9 +291,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, ...@@ -291,9 +291,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
case LINK_RATE_RBR2: case LINK_RATE_RBR2:
case LINK_RATE_HIGH2: case LINK_RATE_HIGH2:
case LINK_RATE_HIGH3: case LINK_RATE_HIGH3:
#if defined(CONFIG_DRM_AMD_DC_DCN)
case LINK_RATE_UHBR10: case LINK_RATE_UHBR10:
#endif
break; break;
default: default:
valid_input = false; valid_input = false;
...@@ -3411,7 +3409,6 @@ static int disable_hpd_get(void *data, u64 *val) ...@@ -3411,7 +3409,6 @@ static int disable_hpd_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,
disable_hpd_set, "%llu\n"); disable_hpd_set, "%llu\n");
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* /*
* Temporary w/a to force sst sequence in M42D DP2 mst receiver * Temporary w/a to force sst sequence in M42D DP2 mst receiver
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst
...@@ -3459,7 +3456,6 @@ static int dp_ignore_cable_id_get(void *data, u64 *val) ...@@ -3459,7 +3456,6 @@ static int dp_ignore_cable_id_get(void *data, u64 *val)
} }
DEFINE_DEBUGFS_ATTRIBUTE(dp_ignore_cable_id_ops, dp_ignore_cable_id_get, DEFINE_DEBUGFS_ATTRIBUTE(dp_ignore_cable_id_ops, dp_ignore_cable_id_get,
dp_ignore_cable_id_set, "%llu\n"); dp_ignore_cable_id_set, "%llu\n");
#endif
/* /*
* Sets the DC visual confirm debug option from the given string. * Sets the DC visual confirm debug option from the given string.
...@@ -3608,12 +3604,10 @@ void dtn_debugfs_init(struct amdgpu_device *adev) ...@@ -3608,12 +3604,10 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
adev, &mst_topo_fops); adev, &mst_topo_fops);
debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev,
&dtn_log_fops); &dtn_log_fops);
#if defined(CONFIG_DRM_AMD_DC_DCN)
debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev,
&dp_set_mst_en_for_sst_ops); &dp_set_mst_en_for_sst_ops);
debugfs_create_file("amdgpu_dm_dp_ignore_cable_id", 0644, root, adev, debugfs_create_file("amdgpu_dm_dp_ignore_cable_id", 0644, root, adev,
&dp_ignore_cable_id_ops); &dp_ignore_cable_id_ops);
#endif
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops); &visual_confirm_fops);
......
...@@ -977,9 +977,7 @@ void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) ...@@ -977,9 +977,7 @@ void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
// TODO // TODO
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
{ {
/* TODO: add periodic detection implementation */ /* TODO: add periodic detection implementation */
} }
#endif
...@@ -45,12 +45,10 @@ ...@@ -45,12 +45,10 @@
#include "amdgpu_dm_debugfs.h" #include "amdgpu_dm_debugfs.h"
#endif #endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dc/dcn20/dcn20_resource.h" #include "dc/dcn20/dcn20_resource.h"
bool is_timing_changed(struct dc_stream_state *cur_stream, bool is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream); struct dc_stream_state *new_stream);
#endif
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg) struct drm_dp_aux_msg *msg)
......
...@@ -46,8 +46,6 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, ...@@ -46,8 +46,6 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars { struct dsc_mst_fairness_vars {
int pbn; int pbn;
bool dsc_enabled; bool dsc_enabled;
...@@ -64,6 +62,5 @@ bool needs_dsc_aux_workaround(struct dc_link *link); ...@@ -64,6 +62,5 @@ bool needs_dsc_aux_workaround(struct dc_link *link);
void pre_validate_dsc(struct drm_atomic_state *state, void pre_validate_dsc(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state_ptr, struct dm_atomic_state **dm_state_ptr,
struct dsc_mst_fairness_vars *vars); struct dsc_mst_fairness_vars *vars);
#endif
#endif #endif
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#include "amdgpu_dm.h" #include "amdgpu_dm.h"
#include "modules/power/power_helpers.h" #include "modules/power/power_helpers.h"
#ifdef CONFIG_DRM_AMD_DC_DCN
static bool link_supports_psrsu(struct dc_link *link) static bool link_supports_psrsu(struct dc_link *link)
{ {
struct dc *dc = link->ctx->dc; struct dc *dc = link->ctx->dc;
...@@ -53,7 +52,6 @@ static bool link_supports_psrsu(struct dc_link *link) ...@@ -53,7 +52,6 @@ static bool link_supports_psrsu(struct dc_link *link)
return true; return true;
} }
#endif
/* /*
* amdgpu_dm_set_psr_caps() - set link psr capabilities * amdgpu_dm_set_psr_caps() - set link psr capabilities
...@@ -73,11 +71,9 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link) ...@@ -73,11 +71,9 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->psr_settings.psr_feature_enabled = false; link->psr_settings.psr_feature_enabled = false;
} else { } else {
#ifdef CONFIG_DRM_AMD_DC_DCN
if (link_supports_psrsu(link)) if (link_supports_psrsu(link))
link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; link->psr_settings.psr_version = DC_PSR_VERSION_SU_1;
else else
#endif
link->psr_settings.psr_version = DC_PSR_VERSION_1; link->psr_settings.psr_version = DC_PSR_VERSION_1;
link->psr_settings.psr_feature_enabled = true; link->psr_settings.psr_feature_enabled = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment