Commit b9e56e41 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.15-dc' of git://people.freedesktop.org/~agd5f/linux into drm-next

Initial pull request for DC support.  We've completed a substantial amount of
the cleanup and restructuring in our TODO.  There are a few additional
cleanups that we are continuing to work on, but I don't think there are any
showstoppers remaining. We've tried to maintain most of the history for bisect
purposes.  Harry made sure all the commits build.  We've enabled DC for vega10
and Raven.  Pre-vega10 parts can be enabled via module parameter (amdgpu.dc=1),
but are not enabled by default at this point until we get further testing
upstream.

This code provides atomic modesetting support for DCE8 (CIK), DCE10 (Tonga,
Fiji), DCE11 (CZ, ST, Polaris), DCE12 (vega10), and DCN1 (RV) including
HDMI and DP audio, DP MST, and many other advanced display features.

+

Latest cleanups for DC from you and Harry.  Note that there is some
flickering on some older asics with this branch due to a regression in powerplay
that has already been fixed and will be included in my next non-DC pull request
next week.

* 'drm-next-4.15-dc' of git://people.freedesktop.org/~agd5f/linux: (897 commits)
  amdgpu/dc: use kref for dc_state.
  amdgpu/dc: convert dc_sink to kref.
  amdgpu/dc: convert dc_stream_state to kref.
  amdgpu/dc: use kref for dc_plane_state.
  amdgpu/dc: convert dc_gamma to kref reference counting.
  amdgpu/dc: convert dc_transfer to use a kref.
  amdgpu/dc: kill a bunch of dead code.
  amdgpu/dc: set a bunch of functions to static.
  amdgpu/dc: kill some deadcode in dc core.
  amdgpu/dc: fix indentation on a couple of returns.
  amdgpu/dm: don't use after free.
  amdgpu/dc: kfree already checks for NULL.
  amdgpu/dc: fix a bunch of misc whitespace.
  amdgpu/dc: drop hw_sequencer_types.h
  amdgpu/dc: drop dce110_types.h
  amdgpu/dc: use kernel ilog2 for log_2.
  amdgpu/dc: don't memset after kzalloc.
  amdgpu/dc: inline dal grph object id functions.
  amdgpu/dc: inline dml_round_to_multiple
  amdgpu/dc: rename bios get_image symbol to something more searchable.
  ...
parents bb7a9c8d 8ee5702a
...@@ -397,5 +397,15 @@ those drivers as simple as possible, so lots of room for refactoring: ...@@ -397,5 +397,15 @@ those drivers as simple as possible, so lots of room for refactoring:
Contact: Noralf Trønnes, Daniel Vetter Contact: Noralf Trønnes, Daniel Vetter
AMD DC Display Driver
---------------------
AMD DC is the display driver for AMD devices starting with Vega. There has been
a bunch of progress cleaning it up but there's still plenty of work to be done.
See drivers/gpu/drm/amd/display/TODO for tasks.
Contact: Harry Wentland, Alex Deucher
Outside DRM Outside DRM
=========== ===========
...@@ -41,3 +41,4 @@ config DRM_AMDGPU_GART_DEBUGFS ...@@ -41,3 +41,4 @@ config DRM_AMDGPU_GART_DEBUGFS
pages. Uses more memory for housekeeping, enable only for debugging. pages. Uses more memory for housekeeping, enable only for debugging.
source "drivers/gpu/drm/amd/acp/Kconfig" source "drivers/gpu/drm/amd/acp/Kconfig"
source "drivers/gpu/drm/amd/display/Kconfig"
...@@ -3,13 +3,19 @@ ...@@ -3,13 +3,19 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
FULL_AMD_PATH=$(src)/.. FULL_AMD_PATH=$(src)/..
DISPLAY_FOLDER_NAME=display
FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \ -I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \ -I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \ -I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc \ -I$(FULL_AMD_PATH)/powerplay/inc \
-I$(FULL_AMD_PATH)/acp/include -I$(FULL_AMD_PATH)/acp/include \
-I$(FULL_AMD_DISPLAY_PATH) \
-I$(FULL_AMD_DISPLAY_PATH)/include \
-I$(FULL_AMD_DISPLAY_PATH)/dc \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
amdgpu-y := amdgpu_drv.o amdgpu-y := amdgpu_drv.o
...@@ -132,4 +138,13 @@ include $(FULL_AMD_PATH)/powerplay/Makefile ...@@ -132,4 +138,13 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES) amdgpu-y += $(AMD_POWERPLAY_FILES)
ifneq ($(CONFIG_DRM_AMD_DC),)
RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
include $(FULL_AMD_DISPLAY_PATH)/Makefile
amdgpu-y += $(AMD_DISPLAY_FILES)
endif
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
#include "amdgpu_vcn.h" #include "amdgpu_vcn.h"
#include "amdgpu_mn.h" #include "amdgpu_mn.h"
#include "amdgpu_dm.h"
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
#include "amdgpu_virt.h" #include "amdgpu_virt.h"
...@@ -101,6 +102,8 @@ extern int amdgpu_vm_fragment_size; ...@@ -101,6 +102,8 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug; extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode; extern int amdgpu_vm_update_mode;
extern int amdgpu_dc;
extern int amdgpu_dc_log;
extern int amdgpu_sched_jobs; extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission; extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict; extern int amdgpu_no_evict;
...@@ -1510,6 +1513,7 @@ struct amdgpu_device { ...@@ -1510,6 +1513,7 @@ struct amdgpu_device {
/* display */ /* display */
bool enable_virtual_display; bool enable_virtual_display;
struct amdgpu_mode_info mode_info; struct amdgpu_mode_info mode_info;
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work; struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq; struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src pageflip_irq; struct amdgpu_irq_src pageflip_irq;
...@@ -1565,6 +1569,9 @@ struct amdgpu_device { ...@@ -1565,6 +1569,9 @@ struct amdgpu_device {
/* GDS */ /* GDS */
struct amdgpu_gds gds; struct amdgpu_gds gds;
/* display related functionality */
struct amdgpu_display_manager dm;
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks; int num_ip_blocks;
struct mutex mn_lock; struct mutex mn_lock;
...@@ -1626,6 +1633,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); ...@@ -1626,6 +1633,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
/* /*
* Registers read & write functions. * Registers read & write functions.
*/ */
...@@ -1886,5 +1896,11 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1886,5 +1896,11 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo, uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping); struct amdgpu_bo_va_mapping **mapping);
#if defined(CONFIG_DRM_AMD_DC)
int amdgpu_dm_display_resume(struct amdgpu_device *adev );
#else
static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
#endif
#include "amdgpu_object.h" #include "amdgpu_object.h"
#endif #endif
...@@ -911,10 +911,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, ...@@ -911,10 +911,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
struct cgs_display_info *info) struct cgs_display_info *info)
{ {
CGS_FUNC_ADEV; CGS_FUNC_ADEV;
struct amdgpu_crtc *amdgpu_crtc;
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
uint32_t line_time_us, vblank_lines;
struct cgs_mode_info *mode_info; struct cgs_mode_info *mode_info;
if (info == NULL) if (info == NULL)
...@@ -928,6 +924,12 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, ...@@ -928,6 +924,12 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
mode_info->ref_clock = adev->clock.spll.reference_freq; mode_info->ref_clock = adev->clock.spll.reference_freq;
} }
if (!amdgpu_device_has_dc_support(adev)) {
struct amdgpu_crtc *amdgpu_crtc;
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
uint32_t line_time_us, vblank_lines;
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) { &ddev->mode_config.crtc_list, head) {
...@@ -951,7 +953,14 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, ...@@ -951,7 +953,14 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
} }
} }
} }
} else {
info->display_count = adev->pm.pm_display_cfg.num_display;
if (mode_info != NULL) {
mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
mode_info->ref_clock = adev->clock.spll.reference_freq;
}
}
return 0; return 0;
} }
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h> #include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h> #include <linux/vga_switcheroo.h>
...@@ -1982,6 +1983,52 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) ...@@ -1982,6 +1983,52 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
} }
} }
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC)
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
case CHIP_TONGA:
case CHIP_FIJI:
#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
return amdgpu_dc != 0;
#endif
case CHIP_KABINI:
case CHIP_MULLINS:
return amdgpu_dc > 0;
case CHIP_VEGA10:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
#endif
return amdgpu_dc != 0;
#endif
default:
return false;
}
}
/**
* amdgpu_device_has_dc_support - check if dc is supported
*
* @adev: amdgpu_device_pointer
*
* Returns true for supported, false for not supported
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev))
return false;
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
/** /**
* amdgpu_device_init - initialize the driver * amdgpu_device_init - initialize the driver
* *
...@@ -2035,7 +2082,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -2035,7 +2082,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
...@@ -2178,6 +2224,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -2178,6 +2224,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed; goto failed;
} }
/* init i2c buses */ /* init i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_atombios_i2c_init(adev); amdgpu_atombios_i2c_init(adev);
} }
...@@ -2310,6 +2357,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) ...@@ -2310,6 +2357,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->accel_working = false; adev->accel_working = false;
cancel_delayed_work_sync(&adev->late_init_work); cancel_delayed_work_sync(&adev->late_init_work);
/* free i2c buses */ /* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev); amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev); amdgpu_atombios_fini(adev);
kfree(adev->bios); kfree(adev->bios);
...@@ -2361,12 +2409,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) ...@@ -2361,12 +2409,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev); drm_kms_helper_poll_disable(dev);
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */ /* turn off display hw */
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} }
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
}
amdgpu_amdkfd_suspend(adev); amdgpu_amdkfd_suspend(adev);
...@@ -2509,13 +2559,25 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2509,13 +2559,25 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* blat the mode back in */ /* blat the mode back in */
if (fbcon) { if (fbcon) {
if (!amdgpu_device_has_dc_support(adev)) {
/* pre DCE11 */
drm_helper_resume_force_mode(dev); drm_helper_resume_force_mode(dev);
/* turn on display hw */ /* turn on display hw */
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
} }
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
} else {
/*
* There is no equivalent atomic helper to turn on
* display, so we defined our own function for this,
* once suspend resume is supported by the atomic
* framework this will be reworked
*/
amdgpu_dm_display_resume(adev);
}
} }
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);
...@@ -2532,7 +2594,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2532,7 +2594,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM #ifdef CONFIG_PM
dev->dev->power.disable_depth++; dev->dev->power.disable_depth++;
#endif #endif
if (!amdgpu_device_has_dc_support(adev))
drm_helper_hpd_irq_event(dev); drm_helper_hpd_irq_event(dev);
else
drm_kms_helper_hotplug_event(dev);
#ifdef CONFIG_PM #ifdef CONFIG_PM
dev->dev->power.disable_depth--; dev->dev->power.disable_depth--;
#endif #endif
...@@ -2829,6 +2894,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) ...@@ -2829,6 +2894,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
*/ */
int amdgpu_gpu_reset(struct amdgpu_device *adev) int amdgpu_gpu_reset(struct amdgpu_device *adev)
{ {
struct drm_atomic_state *state = NULL;
int i, r; int i, r;
int resched; int resched;
bool need_full_reset, vram_lost = false; bool need_full_reset, vram_lost = false;
...@@ -2842,6 +2908,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) ...@@ -2842,6 +2908,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* block TTM */ /* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
/* store modesetting */
if (amdgpu_device_has_dc_support(adev))
state = drm_atomic_helper_suspend(adev->ddev);
/* block scheduler */ /* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
...@@ -2959,6 +3028,10 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) ...@@ -2959,6 +3028,10 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
} }
} }
if (amdgpu_device_has_dc_support(adev)) {
r = drm_atomic_helper_resume(adev->ddev, state);
amdgpu_dm_display_resume(adev);
} else
drm_helper_resume_force_mode(adev->ddev); drm_helper_resume_force_mode(adev->ddev);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
......
...@@ -518,7 +518,7 @@ amdgpu_framebuffer_init(struct drm_device *dev, ...@@ -518,7 +518,7 @@ amdgpu_framebuffer_init(struct drm_device *dev,
return 0; return 0;
} }
static struct drm_framebuffer * struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev, amdgpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd) const struct drm_mode_fb_cmd2 *mode_cmd)
...@@ -556,7 +556,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, ...@@ -556,7 +556,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return &amdgpu_fb->base; return &amdgpu_fb->base;
} }
static void amdgpu_output_poll_changed(struct drm_device *dev) void amdgpu_output_poll_changed(struct drm_device *dev)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
amdgpu_fb_output_poll_changed(adev); amdgpu_fb_output_poll_changed(adev);
......
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__
struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd);
void amdgpu_output_poll_changed(struct drm_device *dev);
#endif
...@@ -433,7 +433,7 @@ struct amdgpu_pm { ...@@ -433,7 +433,7 @@ struct amdgpu_pm {
uint32_t fw_version; uint32_t fw_version;
uint32_t pcie_gen_mask; uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask; uint32_t pcie_mlw_mask;
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
}; };
#define R600_SSTU_DFLT 0 #define R600_SSTU_DFLT 0
......
...@@ -104,6 +104,8 @@ int amdgpu_vm_debug = 0; ...@@ -104,6 +104,8 @@ int amdgpu_vm_debug = 0;
int amdgpu_vram_page_split = 512; int amdgpu_vram_page_split = 512;
int amdgpu_vm_update_mode = -1; int amdgpu_vm_update_mode = -1;
int amdgpu_exp_hw_support = 0; int amdgpu_exp_hw_support = 0;
int amdgpu_dc = -1;
int amdgpu_dc_log = 0;
int amdgpu_sched_jobs = 32; int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2; int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0; int amdgpu_no_evict = 0;
...@@ -209,6 +211,12 @@ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444); ...@@ -209,6 +211,12 @@ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(dc, amdgpu_dc, int, 0444);
MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
module_param_named(dc_log, amdgpu_dc_log, int, 0444);
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)"); MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
...@@ -516,15 +524,15 @@ static const struct pci_device_id pciidlist[] = { ...@@ -516,15 +524,15 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */ /* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Raven */ /* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
......
...@@ -42,11 +42,6 @@ ...@@ -42,11 +42,6 @@
this contains a helper + a amdgpu fb this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass. the helper contains a pointer to amdgpu framebuffer baseclass.
*/ */
struct amdgpu_fbdev {
struct drm_fb_helper helper;
struct amdgpu_framebuffer rfb;
struct amdgpu_device *adev;
};
static int static int
amdgpufb_open(struct fb_info *info, int user) amdgpufb_open(struct fb_info *info, int user)
...@@ -353,6 +348,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) ...@@ -353,6 +348,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
drm_fb_helper_single_add_all_connectors(&rfbdev->helper); drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */ /* disable all the possible outputs/crtcs before entering KMS mode */
if (!amdgpu_device_has_dc_support(adev))
drm_helper_disable_unused_functions(adev->ddev); drm_helper_disable_unused_functions(adev->ddev);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
......
...@@ -37,6 +37,10 @@ ...@@ -37,6 +37,10 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#ifdef CONFIG_DRM_AMD_DC
#include "amdgpu_dm_irq.h"
#endif
#define AMDGPU_WAIT_IDLE_TIMEOUT 200 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
/* /*
...@@ -221,15 +225,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev) ...@@ -221,15 +225,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock); spin_lock_init(&adev->irq.lock);
if (!adev->enable_virtual_display)
/* Disable vblank irqs aggressively for power-saving */
adev->ddev->vblank_disable_immediate = true;
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
if (r) {
return r;
}
/* enable msi */ /* enable msi */
adev->irq.msi_enabled = false; adev->irq.msi_enabled = false;
...@@ -241,7 +236,21 @@ int amdgpu_irq_init(struct amdgpu_device *adev) ...@@ -241,7 +236,21 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
} }
} }
INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func); if (!amdgpu_device_has_dc_support(adev)) {
if (!adev->enable_virtual_display)
/* Disable vblank irqs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev->ddev->vblank_disable_immediate = true;
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
if (r)
return r;
/* pre DCE11 */
INIT_WORK(&adev->hotplug_work,
amdgpu_hotplug_work_func);
}
INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
adev->irq.installed = true; adev->irq.installed = true;
......
...@@ -1035,7 +1035,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { ...@@ -1035,7 +1035,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
}; };
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
......
...@@ -38,11 +38,15 @@ ...@@ -38,11 +38,15 @@
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h> #include <drm/drm_plane_helper.h>
#include <drm/drm_fb_helper.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/i2c-algo-bit.h> #include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include "amdgpu_irq.h" #include "amdgpu_irq.h"
#include <drm/drm_dp_mst_helper.h>
#include "modules/inc/mod_freesync.h"
struct amdgpu_bo; struct amdgpu_bo;
struct amdgpu_device; struct amdgpu_device;
struct amdgpu_encoder; struct amdgpu_encoder;
...@@ -53,9 +57,13 @@ struct amdgpu_hpd; ...@@ -53,9 +57,13 @@ struct amdgpu_hpd;
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base) #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base) #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base) #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
#define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base)
#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
#define AMDGPU_MAX_HPD_PINS 6 #define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6 #define AMDGPU_MAX_CRTCS 6
#define AMDGPU_MAX_PLANES 6
#define AMDGPU_MAX_AFMT_BLOCKS 9 #define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type { enum amdgpu_rmx_type {
...@@ -292,6 +300,30 @@ struct amdgpu_display_funcs { ...@@ -292,6 +300,30 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id, uint16_t connector_object_id,
struct amdgpu_hpd *hpd, struct amdgpu_hpd *hpd,
struct amdgpu_router *router); struct amdgpu_router *router);
/* it is used to enter or exit into free sync mode */
int (*notify_freesync)(struct drm_device *dev, void *data,
struct drm_file *filp);
/* it is used to allow enablement of freesync mode */
int (*set_freesync_property)(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
};
struct amdgpu_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
/* caching for later use */
uint64_t address;
};
struct amdgpu_fbdev {
struct drm_fb_helper helper;
struct amdgpu_framebuffer rfb;
struct list_head fbdev_list;
struct amdgpu_device *adev;
}; };
struct amdgpu_mode_info { struct amdgpu_mode_info {
...@@ -299,6 +331,7 @@ struct amdgpu_mode_info { ...@@ -299,6 +331,7 @@ struct amdgpu_mode_info {
struct card_info *atom_card_info; struct card_info *atom_card_info;
bool mode_config_initialized; bool mode_config_initialized;
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS]; struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS]; struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */ /* DVI-I properties */
struct drm_property *coherent_mode_property; struct drm_property *coherent_mode_property;
...@@ -328,6 +361,7 @@ struct amdgpu_mode_info { ...@@ -328,6 +361,7 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */ int num_dig; /* number of dig blocks */
int disp_priority; int disp_priority;
const struct amdgpu_display_funcs *funcs; const struct amdgpu_display_funcs *funcs;
const enum drm_plane_type *plane_type;
}; };
#define AMDGPU_MAX_BL_LEVEL 0xFF #define AMDGPU_MAX_BL_LEVEL 0xFF
...@@ -400,6 +434,16 @@ struct amdgpu_crtc { ...@@ -400,6 +434,16 @@ struct amdgpu_crtc {
/* for virtual dce */ /* for virtual dce */
struct hrtimer vblank_timer; struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled; enum amdgpu_interrupt_state vsync_timer_enabled;
int otg_inst;
/* After Set Mode stream will be non-NULL */
const struct dc_stream *stream;
struct drm_pending_vblank_event *event;
};
struct amdgpu_plane {
struct drm_plane base;
enum drm_plane_type plane_type;
}; };
struct amdgpu_encoder_atom_dig { struct amdgpu_encoder_atom_dig {
...@@ -489,6 +533,19 @@ enum amdgpu_connector_dither { ...@@ -489,6 +533,19 @@ enum amdgpu_connector_dither {
AMDGPU_FMT_DITHER_ENABLE = 1, AMDGPU_FMT_DITHER_ENABLE = 1,
}; };
struct amdgpu_dm_dp_aux {
struct drm_dp_aux aux;
struct ddc_service *ddc_service;
};
struct amdgpu_i2c_adapter {
struct i2c_adapter base;
struct ddc_service *ddc_service;
};
#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
struct amdgpu_connector { struct amdgpu_connector {
struct drm_connector base; struct drm_connector base;
uint32_t connector_id; uint32_t connector_id;
...@@ -500,6 +557,14 @@ struct amdgpu_connector { ...@@ -500,6 +557,14 @@ struct amdgpu_connector {
/* we need to mind the EDID between detect /* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */ and get modes due to analog/digital/tvencoder */
struct edid *edid; struct edid *edid;
/* number of modes generated from EDID at 'dc_sink' */
int num_modes;
/* The 'old' sink - before an HPD.
* The 'current' sink is in dc_link->sink. */
struct dc_sink *dc_sink;
struct dc_link *dc_link;
struct dc_sink *dc_em_sink;
const struct dc_stream *stream;
void *con_priv; void *con_priv;
bool dac_load_detect; bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */ bool detected_by_load; /* if the connection status was determined by load */
...@@ -510,11 +575,39 @@ struct amdgpu_connector { ...@@ -510,11 +575,39 @@ struct amdgpu_connector {
enum amdgpu_connector_audio audio; enum amdgpu_connector_audio audio;
enum amdgpu_connector_dither dither; enum amdgpu_connector_dither dither;
unsigned pixelclock_for_modeset; unsigned pixelclock_for_modeset;
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
struct drm_dp_mst_port *port;
struct amdgpu_connector *mst_port;
struct amdgpu_encoder *mst_encoder;
struct semaphore mst_sem;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
/* Monitor range limits */
int min_vfreq ;
int max_vfreq ;
int pixel_clock_mhz;
/*freesync caps*/
struct mod_freesync_caps caps;
struct mutex hpd_lock;
}; };
struct amdgpu_framebuffer { /* TODO: start to use this struct and remove same field from base one */
struct drm_framebuffer base; struct amdgpu_mst_connector {
struct drm_gem_object *obj; struct amdgpu_connector base;
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
struct drm_dp_mst_port *port;
struct amdgpu_connector *mst_port;
bool is_mst_connector;
struct amdgpu_encoder *mst_encoder;
}; };
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
......
...@@ -1466,7 +1466,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) ...@@ -1466,7 +1466,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
list_for_each_entry(crtc, list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) { &ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc); amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled) { if (amdgpu_crtc->enabled) {
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
adev->pm.dpm.new_active_crtc_count++; adev->pm.dpm.new_active_crtc_count++;
} }
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#include "oss/oss_2_0_d.h" #include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h" #include "oss/oss_2_0_sh_mask.h"
#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h" #include "amdgpu_powerplay.h"
#include "dce_virtual.h" #include "dce_virtual.h"
...@@ -1900,6 +1901,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1900,6 +1901,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display) if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else else
amdgpu_ip_block_add(adev, &dce_v8_2_ip_block); amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
...@@ -1914,6 +1919,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1914,6 +1919,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display) if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else else
amdgpu_ip_block_add(adev, &dce_v8_5_ip_block); amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block); amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
...@@ -1928,6 +1937,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1928,6 +1937,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display) if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else else
amdgpu_ip_block_add(adev, &dce_v8_1_ip_block); amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block); amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
...@@ -1943,6 +1956,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1943,6 +1956,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display) if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else else
amdgpu_ip_block_add(adev, &dce_v8_3_ip_block); amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
......
...@@ -535,6 +535,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) ...@@ -535,6 +535,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block); amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block); amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
...@@ -548,6 +554,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) ...@@ -548,6 +554,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block); amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block); amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
......
...@@ -77,6 +77,7 @@ ...@@ -77,6 +77,7 @@
#endif #endif
#include "dce_virtual.h" #include "dce_virtual.h"
#include "mxgpu_vi.h" #include "mxgpu_vi.h"
#include "amdgpu_dm.h"
/* /*
* Indirect registers accessor * Indirect registers accessor
...@@ -1502,6 +1503,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1502,6 +1503,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else else
amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
...@@ -1518,6 +1523,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1518,6 +1523,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else else
amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
...@@ -1536,6 +1545,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1536,6 +1545,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display) if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else else
amdgpu_ip_block_add(adev, &dce_v11_2_ip_block); amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
...@@ -1550,6 +1563,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1550,6 +1563,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display) if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
...@@ -1567,6 +1584,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1567,6 +1584,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display) if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block); amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
......
menu "Display Engine Configuration"
depends on DRM && DRM_AMDGPU
config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
config DRM_AMD_DC_PRE_VEGA
bool "DC support for Polaris and older ASICs"
default n
help
Choose this option to enable the new DC support for older asics
by default. This includes Polaris, Carrizo, Tonga, Bonaire,
and Hawaii.
config DRM_AMD_DC_DCN1_0
bool "DCN 1.0 Raven family"
depends on DRM_AMD_DC && X86
help
Choose this option if you want to have
RV family for display engine
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
help
Choose this option
if you want to hit
kdgb_break in assert.
endmenu
#
# Makefile for the DAL (Display Abstract Layer), which is a sub-component
# of the AMDGPU drm driver.
# It provides the HW control for display related functionalities.
AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
DAL_LIBS = amdgpu_dm dc modules/freesync
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
include $(AMD_DAL)
===============================================================================
TODOs
===============================================================================
1. Base this on drm-next - WIP
2. Cleanup commit history
3. WIP - Drop page flip helper and use DRM's version
4. DONE - Flatten all DC objects
* dc_stream/core_stream/stream should just be dc_stream
* Same for other DC objects
"Is there any major reason to keep all those abstractions?
Could you collapse everything into struct dc_stream?
I haven't looked recently but I didn't get the impression there was a
lot of design around what was public/protected, more whatever needed
to be used by someone else was in public."
~ Dave Airlie
5. DONE - Rename DC objects to align more with DRM
* dc_surface -> dc_plane_state
* dc_stream -> dc_stream_state
6. DONE - Per-plane and per-stream validation
7. WIP - Per-plane and per-stream commit
8. WIP - Split pipe_ctx into plane and stream resource structs
9. Attach plane and stream reources to state object instead of validate_context
10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
* Use drm_display_info instead
* Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
"Making sure you use the sink-specific helper libraries and kernel
subsystems, since there's really no good reason to have 2nd
implementation of those in the kernel. Looks likes that's done for mst
and edid parsing. There's still a bit a midlayer feeling to the edid
parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
think it'd be much better if you convert that over to reading stuff
from drm_display_info and if needed, push stuff into the core). Also,
I can't come up with a good reason why DC needs all this (except to
reimplement half of our edid quirk table, which really isn't a good
idea). Might be good if you put this onto the list of things to fix
long-term, but imo not a blocker. Definitely make sure new stuff
doesn't slip in (i.e. if you start adding edid quirks to DC instead of
the drm core, refactoring to use the core edid stuff was pointless)."
~ Daniel Vetter
11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
overy complicated HW programming function for sendind and receiving i2c/aux
commands. We can greatly simplify that and move it into dc/dceXYZ like other
HW blocks.
12. drm_modeset_lock in MST should no longer be needed in recent kernels
* Adopt appropriate locking scheme
13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
a few indirections, and consider removing entirely and using the
drm_atomic_helper_best_encoder default behaviour.
14. core/dc_debug.c, consider switching to the atomic state debug helpers and
moving all your driver state printing into the various atomic_print_state
callbacks. There's also plans to expose this stuff in a standard way across all
drivers, to make debugging userspace compositors easier across different hw.
15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
16. Move to core SCDC helpers (I think those are new since initial DC review).
17. There's still a pretty massive layer cake around dp aux and DPCD handling,
with like 3 levels of abstraction and using your own structures instead of the
stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
incompatible styles, just means more reasons not to add a third (or well third
one gets to do the cleanup refactor).
18. There's a pile of sink handling code, both for DP and HDMI where I didn't
immediately recognize the standard. I think long term it'd be best for the drm
subsystem if we try to move as much of that into helpers/core as possible, and
share it with drivers. But that's a very long term goal, and by far not just an
issue with DC - other drivers, especially around DP sink handling, are equally
guilty.
19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
stuff just isn't up to the challenges either. We need to figure out something
that integrates better with DRM and linux debug printing, while not being
useless with filtering output. dynamic debug printing might be an option.
20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
retimer that we need to program to pass PHY compliance. Currently that's
bypassing the i2c device and goes directly to HW. This should be changed.
#
# Makefile for the 'dm' sub-component of DAL.
# It provides the control and status of dm blocks.
AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
AMD_DISPLAY_FILES += $(AMDGPU_DM)
This diff is collapsed.
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __AMDGPU_DM_H__
#define __AMDGPU_DM_H__
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include "dc.h"
/*
* This file contains the definition for amdgpu_display_manager
* and its API for amdgpu driver's use.
* This component provides all the display related functionality
* and this is the only component that calls DAL API.
* The API contained here intended for amdgpu driver use.
* The API that is called directly from KMS framework is located
* in amdgpu_dm_kms.h file
*/
#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
*/
#include "irq_types.h"
#include "signal_types.h"
/* Forward declarations */
struct amdgpu_device;
struct drm_device;
struct amdgpu_dm_irq_handler_data;
struct amdgpu_dm_prev_state {
struct drm_framebuffer *fb;
int32_t x;
int32_t y;
struct drm_display_mode mode;
};
struct common_irq_params {
struct amdgpu_device *adev;
enum dc_irq_source irq_src;
};
struct irq_list_head {
struct list_head head;
/* In case this interrupt needs post-processing, 'work' will be queued*/
struct work_struct work;
};
#ifdef ENABLE_FBC
struct dm_comressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
};
#endif
struct amdgpu_display_manager {
struct dal *dal;
struct dc *dc;
struct cgs_device *cgs_device;
/* lock to be used when DAL is called from SYNC IRQ context */
spinlock_t dal_lock;
struct amdgpu_device *adev; /*AMD base driver*/
struct drm_device *ddev; /*DRM base driver*/
u16 display_indexes_num;
struct amdgpu_dm_prev_state prev_state;
/*
* 'irq_source_handler_table' holds a list of handlers
* per (DAL) IRQ source.
*
* Each IRQ source may need to be handled at different contexts.
* By 'context' we mean, for example:
* - The ISR context, which is the direct interrupt handler.
* - The 'deferred' context - this is the post-processing of the
* interrupt, but at a lower priority.
*
* Note that handlers are called in the same order as they were
* registered (FIFO).
*/
struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
struct common_irq_params
pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
struct common_irq_params
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
/* this spin lock synchronizes access to 'irq_handler_list_table' */
spinlock_t irq_handler_list_table_lock;
/* Timer-related data. */
struct list_head timer_handler_list;
struct workqueue_struct *timer_workqueue;
/* Use dal_mutex for any activity which is NOT syncronized by
* DRM mode setting locks.
* For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
* DRM mode setting locks being acquired. This is where dal_mutex
* is acquired before calling into DAL. */
struct mutex dal_mutex;
struct backlight_device *backlight_dev;
const struct dc_link *backlight_link;
struct work_struct mst_hotplug_work;
struct mod_freesync *freesync_module;
/**
* Caches device atomic state for suspend/resume
*/
struct drm_atomic_state *cached_state;
#ifdef ENABLE_FBC
struct dm_comressor_info compressor;
#endif
};
struct amdgpu_dm_connector {
struct drm_connector base;
uint32_t connector_id;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
/* shared with amdgpu */
struct amdgpu_hpd hpd;
/* number of modes generated from EDID at 'dc_sink' */
int num_modes;
/* The 'old' sink - before an HPD.
* The 'current' sink is in dc_link->sink. */
struct dc_sink *dc_sink;
struct dc_link *dc_link;
struct dc_sink *dc_em_sink;
/* DM only */
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
struct drm_dp_mst_port *port;
struct amdgpu_dm_connector *mst_port;
struct amdgpu_encoder *mst_encoder;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
/* Monitor range limits */
int min_vfreq ;
int max_vfreq ;
int pixel_clock_mhz;
/*freesync caps*/
struct mod_freesync_caps caps;
struct mutex hpd_lock;
bool fake_enable;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
/* basic init/fini API */
int amdgpu_dm_init(struct amdgpu_device *adev);
void amdgpu_dm_fini(struct amdgpu_device *adev);
void amdgpu_dm_destroy(void);
/* initializes drm_device display related structures, based on the information
* provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
* drm_encoder, drm_mode_config
*
* Returns 0 on success
*/
int amdgpu_dm_initialize_drm_device(
struct amdgpu_device *adev);
/* removes and deallocates the drm structures, created by the above function */
void amdgpu_dm_destroy_drm_device(
struct amdgpu_display_manager *dm);
/* Locking/Mutex */
bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm);
bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm);
/* Register "Backlight device" accessible by user-mode. */
void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm);
extern const struct amdgpu_ip_block_version dm_ip_block;
void amdgpu_dm_update_connector_after_detect(
struct amdgpu_dm_connector *aconnector);
struct amdgpu_dm_connector *amdgpu_dm_find_first_crct_matching_connector(
struct drm_atomic_state *state,
struct drm_crtc *crtc,
bool from_state_var);
struct amdgpu_framebuffer;
struct amdgpu_display_manager;
struct dc_validation_set;
struct dc_plane_state;
/* TODO rename to dc_stream_state */
struct dc_stream;
struct dm_plane_state {
struct drm_plane_state base;
struct dc_plane_state *dc_state;
};
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
struct dm_atomic_state {
struct drm_atomic_state base;
struct dc_state *context;
};
#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
/*TODO Jodan Hersen use the one in amdgpu_dm*/
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct amdgpu_plane *aplane,
unsigned long possible_crtcs);
int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t link_index);
int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *amdgpu_dm_connector,
uint32_t link_index,
struct amdgpu_encoder *amdgpu_encoder);
int amdgpu_dm_encoder_init(
struct drm_device *dev,
struct amdgpu_encoder *aencoder,
uint32_t link_index);
void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc);
void amdgpu_dm_connector_destroy(struct drm_connector *connector);
void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder);
int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
int amdgpu_dm_atomic_commit(
struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock);
void amdgpu_dm_atomic_commit_tail(
struct drm_atomic_state *state);
int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
struct drm_connector *connector);
int amdgpu_dm_connector_atomic_set_property(
struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
int amdgpu_dm_connector_atomic_get_property(
struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
void amdgpu_dm_connector_init_helper(
struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
int connector_type,
struct dc_link *link,
int link_index);
int amdgpu_dm_connector_mode_valid(
struct drm_connector *connector,
struct drm_display_mode *mode);
void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector);
void amdgpu_dm_add_sink_to_freesync_module(
struct drm_connector *connector,
struct edid *edid);
void amdgpu_dm_remove_sink_from_freesync_module(
struct drm_connector *connector);
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
#endif /* __AMDGPU_DM_H__ */
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __AMDGPU_DM_IRQ_H__
#define __AMDGPU_DM_IRQ_H__
#include "irq_types.h" /* DAL irq definitions */
/*
* Display Manager IRQ-related interfaces (for use by DAL).
*/
/**
* amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
*
* This function should be called exactly once - during DM initialization.
*
* Returns:
* 0 - success
* non-zero - error
*/
int amdgpu_dm_irq_init(
struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
*
* This function should be called exactly once - during DM destruction.
*
*/
void amdgpu_dm_irq_fini(
struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
*
* @adev: AMD DRM device
* @int_params: parameters for the irq
* @ih: pointer to the irq hander function
* @handler_args: arguments which will be passed to ih
*
* Returns:
* IRQ Handler Index on success.
* NULL on failure.
*
* Cannot be called from an interrupt handler.
*/
void *amdgpu_dm_irq_register_interrupt(
struct amdgpu_device *adev,
struct dc_interrupt_params *int_params,
void (*ih)(void *),
void *handler_args);
/**
* amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
* by amdgpu_dm_irq_register_interrupt().
*
* @adev: AMD DRM device.
* @ih_index: irq handler index which was returned by
* amdgpu_dm_irq_register_interrupt
*/
void amdgpu_dm_irq_unregister_interrupt(
struct amdgpu_device *adev,
enum dc_irq_source irq_source,
void *ih_index);
void amdgpu_dm_irq_register_timer(
struct amdgpu_device *adev,
struct dc_timer_interrupt_params *int_params,
interrupt_handler ih,
void *args);
/**
* amdgpu_dm_irq_handler
* Generic IRQ handler, calls all registered high irq work immediately, and
* schedules work for low irq
*/
int amdgpu_dm_irq_handler(
struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
*
*/
int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
* amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
*
*/
int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
#endif /* __AMDGPU_DM_IRQ_H__ */
This diff is collapsed.
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
#define __DAL_AMDGPU_DM_MST_TYPES_H__
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(
struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
#endif
This diff is collapsed.
#
# Makefile for Display Core (dc) component.
#
DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
ifdef CONFIG_DRM_AMD_DC_DCN1_0
DC_LIBS += dcn10 dml
endif
DC_LIBS += dce120
DC_LIBS += dce112
DC_LIBS += dce110
DC_LIBS += dce100
DC_LIBS += dce80
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
#
# Makefile for the 'utils' sub-component of DAL.
# It provides the general basic services required by other DAL
# subcomponents.
BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
logger.o log_helpers.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
AMD_DISPLAY_FILES += $(AMD_DAL_BASICS)
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#define DIVIDER 10000
/* S2D13 value in [-3.00...0.9999] */
#define S2D13_MIN (-3 * DIVIDER)
#define S2D13_MAX (3 * DIVIDER)
uint16_t fixed_point_to_int_frac(
struct fixed31_32 arg,
uint8_t integer_bits,
uint8_t fractional_bits)
{
int32_t numerator;
int32_t divisor = 1 << fractional_bits;
uint16_t result;
uint16_t d = (uint16_t)dal_fixed31_32_floor(
dal_fixed31_32_abs(
arg));
if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
numerator = (uint16_t)dal_fixed31_32_floor(
dal_fixed31_32_mul_int(
arg,
divisor));
else {
numerator = dal_fixed31_32_floor(
dal_fixed31_32_sub(
dal_fixed31_32_from_int(
1LL << integer_bits),
dal_fixed31_32_recip(
dal_fixed31_32_from_int(
divisor))));
}
if (numerator >= 0)
result = (uint16_t)numerator;
else
result = (uint16_t)(
(1 << (integer_bits + fractional_bits + 1)) + numerator);
if ((result != 0) && dal_fixed31_32_lt(
arg, dal_fixed31_32_zero))
result |= 1 << (integer_bits + fractional_bits);
return result;
}
/**
* convert_float_matrix
* This converts a double into HW register spec defined format S2D13.
* @param :
* @return None
*/
void convert_float_matrix(
uint16_t *matrix,
struct fixed31_32 *flt,
uint32_t buffer_size)
{
const struct fixed31_32 min_2_13 =
dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
const struct fixed31_32 max_2_13 =
dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
uint32_t i;
for (i = 0; i < buffer_size; ++i) {
uint32_t reg_value =
fixed_point_to_int_frac(
dal_fixed31_32_clamp(
flt[i],
min_2_13,
max_2_13),
2,
13);
matrix[i] = (uint16_t)reg_value;
}
}
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_CONVERSION_H__
#define __DAL_CONVERSION_H__
#include "include/fixed31_32.h"
uint16_t fixed_point_to_int_frac(
struct fixed31_32 arg,
uint8_t integer_bits,
uint8_t fractional_bits);
void convert_float_matrix(
uint16_t *matrix,
struct fixed31_32 *flt,
uint32_t buffer_size);
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
}
#endif
This diff is collapsed.
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/fixed32_32.h"
static uint64_t u64_div(uint64_t n, uint64_t d)
{
uint32_t i = 0;
uint64_t r;
uint64_t q = div64_u64_rem(n, d, &r);
for (i = 0; i < 32; ++i) {
uint64_t sbit = q & (1ULL<<63);
r <<= 1;
r |= sbit ? 1 : 0;
q <<= 1;
if (r >= d) {
r -= d;
q |= 1;
}
}
if (2*r >= d)
q += 1;
return q;
}
struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
{
struct fixed32_32 fx;
fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
return fx;
}
struct fixed32_32 dal_fixed32_32_add(
struct fixed32_32 lhs,
struct fixed32_32 rhs)
{
struct fixed32_32 fx = {lhs.value + rhs.value};
ASSERT(fx.value >= rhs.value);
return fx;
}
struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
{
struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
ASSERT(fx.value >= (uint64_t)rhs << 32);
return fx;
}
struct fixed32_32 dal_fixed32_32_sub(
struct fixed32_32 lhs,
struct fixed32_32 rhs)
{
struct fixed32_32 fx;
ASSERT(lhs.value >= rhs.value);
fx.value = lhs.value - rhs.value;
return fx;
}
struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
{
struct fixed32_32 fx;
ASSERT(lhs.value >= ((uint64_t)rhs<<32));
fx.value = lhs.value - ((uint64_t)rhs<<32);
return fx;
}
struct fixed32_32 dal_fixed32_32_mul(
struct fixed32_32 lhs,
struct fixed32_32 rhs)
{
struct fixed32_32 fx;
uint64_t lhs_int = lhs.value>>32;
uint64_t lhs_frac = (uint32_t)lhs.value;
uint64_t rhs_int = rhs.value>>32;
uint64_t rhs_frac = (uint32_t)rhs.value;
uint64_t ahbh = lhs_int * rhs_int;
uint64_t ahbl = lhs_int * rhs_frac;
uint64_t albh = lhs_frac * rhs_int;
uint64_t albl = lhs_frac * rhs_frac;
ASSERT((ahbh>>32) == 0);
fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
return fx;
}
struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
{
struct fixed32_32 fx;
uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
uint64_t lhsf;
ASSERT((lhsi>>32) == 0);
lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
ASSERT((lhsi<<32) + lhsf >= lhsf);
fx.value = (lhsi<<32) + lhsf;
return fx;
}
struct fixed32_32 dal_fixed32_32_div(
struct fixed32_32 lhs,
struct fixed32_32 rhs)
{
struct fixed32_32 fx;
fx.value = u64_div(lhs.value, rhs.value);
return fx;
}
struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
{
struct fixed32_32 fx;
fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
return fx;
}
uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
{
ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
}
uint32_t dal_fixed32_32_round(struct fixed32_32 v)
{
ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
return (v.value + (1ULL<<31))>>32;
}
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/grph_object_id.h"
static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
{
bool rc = true;
switch (id.type) {
case OBJECT_TYPE_UNKNOWN:
rc = false;
break;
case OBJECT_TYPE_GPU:
case OBJECT_TYPE_ENGINE:
/* do NOT check for id.id == 0 */
if (id.enum_id == ENUM_ID_UNKNOWN)
rc = false;
break;
default:
if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
rc = false;
break;
}
return rc;
}
bool dal_graphics_object_id_is_equal(
struct graphics_object_id id1,
struct graphics_object_id id2)
{
if (false == dal_graphics_object_id_is_valid(id1)) {
dm_output_to_console(
"%s: Warning: comparing invalid object 'id1'!\n", __func__);
return false;
}
if (false == dal_graphics_object_id_is_valid(id2)) {
dm_output_to_console(
"%s: Warning: comparing invalid object 'id2'!\n", __func__);
return false;
}
if (id1.id == id2.id && id1.enum_id == id2.enum_id
&& id1.type == id2.type)
return true;
return false;
}
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "logger.h"
#include "include/logger_interface.h"
#include "dm_helpers.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
struct dc_signal_type_info {
enum signal_type type;
char name[MAX_NAME_LEN];
};
static const struct dc_signal_type_info signal_type_info_tbl[] = {
{SIGNAL_TYPE_NONE, "NC"},
{SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
{SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
{SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
{SIGNAL_TYPE_LVDS, "LVDS"},
{SIGNAL_TYPE_RGB, "VGA"},
{SIGNAL_TYPE_DISPLAY_PORT, "DP"},
{SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
{SIGNAL_TYPE_EDP, "eDP"},
{SIGNAL_TYPE_VIRTUAL, "Virtual"}
};
void dc_conn_log(struct dc_context *ctx,
const struct dc_link *link,
uint8_t *hex_data,
int hex_data_count,
enum dc_log_type event,
const char *msg,
...)
{
int i;
va_list args;
struct log_entry entry = { 0 };
enum signal_type signal;
if (link->local_sink)
signal = link->local_sink->sink_signal;
else
signal = link->connector_signal;
if (link->type == dc_connection_mst_branch)
signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
dm_logger_open(ctx->logger, &entry, event);
for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
if (signal == signal_type_info_tbl[i].type)
break;
dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
signal_type_info_tbl[i].name,
link->link_index);
va_start(args, msg);
entry.buf_offset += dm_log_to_buffer(
&entry.buf[entry.buf_offset],
LOG_MAX_LINE_SIZE - entry.buf_offset,
msg, args);
if (entry.buf[strlen(entry.buf) - 1] == '\n') {
entry.buf[strlen(entry.buf) - 1] = '\0';
entry.buf_offset--;
}
if (hex_data)
for (i = 0; i < hex_data_count; i++)
dm_logger_append(&entry, "%2.2X ", hex_data[i]);
dm_logger_append(&entry, "^\n");
dm_helpers_dc_conn_log(ctx, &entry, event);
dm_logger_close(&entry);
va_end(args);
}
This diff is collapsed.
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_LOGGER_H__
#define __DAL_LOGGER_H__
/* Structure for keeping track of offsets, buffer, etc */
#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
* change log line size to 896 to meet the request.
*/
#define LOG_MAX_LINE_SIZE 896
#include "include/logger_types.h"
struct dal_logger {
/* How far into the circular buffer has been read by dsat
* Read offset should never cross write offset. Write \0's to
* read data just to be sure?
*/
uint32_t buffer_read_offset;
/* How far into the circular buffer we have written
* Write offset should never cross read offset
*/
uint32_t buffer_write_offset;
uint32_t write_wrap_count;
uint32_t read_wrap_count;
uint32_t open_count;
char *log_buffer; /* Pointer to malloc'ed buffer */
uint32_t log_buffer_size; /* Size of circular buffer */
uint32_t mask; /*array of masks for major elements*/
union logger_flags flags;
struct dc_context *ctx;
};
#endif /* __DAL_LOGGER_H__ */
This diff is collapsed.
#
# Makefile for the 'bios' sub-component of DAL.
# It provides the parsing and executing controls for atom bios image.
BIOS = bios_parser.o bios_parser_interface.o bios_parser_helper.o command_table.o command_table_helper.o bios_parser_common.o
BIOS += command_table2.o command_table_helper2.o bios_parser2.o
AMD_DAL_BIOS = $(addprefix $(AMDDALPATH)/dc/bios/,$(BIOS))
AMD_DISPLAY_FILES += $(AMD_DAL_BIOS)
###############################################################################
# DCE 8x
###############################################################################
# All DCE8.x are derived from DCE8.0, so 8.0 MUST be defined if ANY of
# DCE8.x is compiled.
AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce80/command_table_helper_dce80.o
###############################################################################
# DCE 11x
###############################################################################
AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce110/command_table_helper_dce110.o
AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper_dce112.o
AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper2_dce112.o
This diff is collapsed.
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_BIOS_PARSER_H__
#define __DAL_BIOS_PARSER_H__
struct dc_bios *bios_parser_create(
struct bp_init_data *init,
enum dce_version dce_version);
#endif
This diff is collapsed.
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_BIOS_PARSER2_H__
#define __DAL_BIOS_PARSER2_H__
struct dc_bios *firmware_parser_create(
struct bp_init_data *init,
enum dce_version dce_version);
#endif
This diff is collapsed.
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __BIOS_PARSER_COMMON_H__
#define __BIOS_PARSER_COMMON_H__
#include "dm_services.h"
#include "ObjectID.h"
struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id);
#endif
This diff is collapsed.
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_BIOS_PARSER_HELPER_H__
#define __DAL_BIOS_PARSER_HELPER_H__
struct bios_parser;
uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset,
uint32_t size);
bool bios_is_accelerated_mode(struct dc_bios *bios);
void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment