Commit 385c59c7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2019-01-11' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Not a huge amount for rc2, assume the usual quiet period, and rc3 will
  be most of it.

  amdgpu:
   - Powerplay fixes
   - Virtual display pinning fixes
   - Golden register updates for Vega
   - Pitch and gem size validation fixes
   - SR-IOV init error fix
   - Pagetables in system RAM disable for some Raven system
   - DP-MST resume fixes

  tc358767 bridge:
   - fix to work with displayport connector"

* tag 'drm-fixes-2019-01-11' of git://anongit.freedesktop.org/drm/drm: (26 commits)
  drm/amdgpu: disable system memory page tables for now
  drm/amdgpu: set WRITE_BURST_LENGTH to 64B to workaround SDMA1 hang
  drm/amdgpu: fix CPDMA hang in PRT mode for VEGA20
  drm/bridge: tc358767: use DP connector if no panel set
  drm/bridge: tc358767: fix output H/V syncs
  drm/bridge: tc358767: reject modes which require too much BW
  drm/bridge: tc358767: fix initial DP0/1_SRCCTRL value
  drm/bridge: tc358767: fix single lane configuration
  drm/bridge: tc358767: add defines for DP1_SRCCTRL & PHY_2LANE
  drm/bridge: tc358767: add bus flags
  drm/dp_mst: Add __must_check to drm_dp_mst_topology_mgr_resume()
  drm/amdgpu: Don't fail resume process if resuming atomic state fails
  drm/amdgpu: Don't ignore rc from drm_dp_mst_topology_mgr_resume()
  drm/amdgpu: validate user GEM object size
  drm/amdgpu: validate user pitch alignment
  drm/amd/powerplay: drop the unnecessary uclk hard min setting
  drm/amd/powerplay: avoid possible buffer overflow
  drm/amd/powerplay: create pp_od_clk_voltage device file under OD support
  drm/amd/powerplay: update OD support flag for SKU with no OD capabilities
  drm/amdgpu: make gfx9 enter into rlc safe mode when set MGCG
  ...
parents de6629eb f34c48e0
......@@ -1701,8 +1701,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
if (amdgpu_sriov_vf(adev))
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
}
return 0;
}
......@@ -2632,9 +2634,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
if (amdgpu_sriov_vf(adev))
amdgpu_virt_init_data_exchange(adev);
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
......@@ -2798,7 +2797,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo) {
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
......@@ -2906,7 +2905,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo) {
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
......@@ -3226,6 +3225,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
r = amdgpu_ib_ring_tests(adev);
error:
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
atomic_inc(&adev->vram_lost_counter);
......
......@@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
if (!adev->enable_virtual_display) {
r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
}
r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
......@@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
work->base = amdgpu_bo_gpu_offset(new_abo);
if (!adev->enable_virtual_display)
work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
......@@ -242,9 +245,10 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
unpin:
if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
DRM_ERROR("failed to unpin new abo in error path\n");
}
if (!adev->enable_virtual_display)
if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
DRM_ERROR("failed to unpin new abo in error path\n");
unreserve:
amdgpu_bo_unreserve(new_abo);
......@@ -527,6 +531,17 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
int ret;
int height;
struct amdgpu_device *adev = dev->dev_private;
int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
int pitch = mode_cmd->pitches[0] / cpp;
pitch = amdgpu_align_pitch(adev, pitch, cpp, false);
if (mode_cmd->pitches[0] != pitch) {
DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n",
pitch, mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
}
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
......@@ -541,6 +556,13 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
height = ALIGN(mode_cmd->height, 8);
if (obj->size < pitch * height) {
DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n",
pitch * height, obj->size);
return ERR_PTR(-EINVAL);
}
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_put_unlocked(obj);
......
......@@ -2008,6 +2008,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
if (adev->pm.sysfs_initialized)
......@@ -2091,12 +2092,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_power_profile_mode\n");
return ret;
}
ret = device_create_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
if (ret) {
DRM_ERROR("failed to create device file "
"pp_od_clk_voltage\n");
return ret;
if (hwmgr->od_enabled) {
ret = device_create_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
if (ret) {
DRM_ERROR("failed to create device file "
"pp_od_clk_voltage\n");
return ret;
}
}
ret = device_create_file(adev->dev,
&dev_attr_gpu_busy_percent);
......@@ -2118,6 +2121,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
if (adev->pm.dpm_enabled == 0)
return;
......@@ -2138,8 +2143,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
device_remove_file(adev->dev,
&dev_attr_pp_power_profile_mode);
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
if (hwmgr->od_enabled)
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
}
......
......@@ -847,9 +847,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp->size = amdgpu_vm_bo_size(adev, level);
bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
adev->flags & AMD_IS_APU)
bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
......
......@@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
struct amdgpu_bo *abo;
abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
}
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
......@@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
amdgpu_bo_unref(&works->old_abo);
kfree(works->shared);
kfree(works);
return 0;
}
......
......@@ -4233,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
int r;
/* Set the write pointer delay */
WREG32(mmCP_RB_WPTR_DELAY, 0);
......@@ -4278,9 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev);
ring->sched.ready = true;
r = amdgpu_ring_test_helper(ring);
return r;
return 0;
}
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
......@@ -4369,10 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ enable failed\n");
return r;
amdgpu_ring_commit(kiq_ring);
return 0;
}
static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
......@@ -4709,16 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
if (r)
goto done;
/* Test KCQs - reversing the order of rings seems to fix ring test failure
* after GPU reset
*/
for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
done:
return r;
}
static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
{
int r, i;
struct amdgpu_ring *ring;
/* collect all the ring_tests here, gfx, kiq, compute */
ring = &adev->gfx.gfx_ring[0];
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
ring = &adev->gfx.kiq.ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
r = amdgpu_ring_test_helper(ring);
amdgpu_ring_test_helper(ring);
}
done:
return r;
return 0;
}
static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
......@@ -4739,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
r = gfx_v8_0_kcq_resume(adev);
if (r)
return r;
r = gfx_v8_0_cp_test_all_rings(adev);
if (r)
return r;
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
return 0;
......@@ -5086,6 +5104,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
gfx_v8_0_cp_gfx_resume(adev);
gfx_v8_0_cp_test_all_rings(adev);
adev->gfx.rlc.funcs->start(adev);
return 0;
......
......@@ -113,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
......@@ -135,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
......@@ -3587,6 +3587,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
amdgpu_gfx_rlc_enter_safe_mode(adev);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
......@@ -3651,6 +3653,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
......
......@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
return r;
}
/* Retrieve checksum from mailbox2 */
if (req == IDH_REQ_GPU_INIT_ACCESS) {
if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
......
......@@ -78,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
......@@ -96,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
};
......@@ -103,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
};
......
......@@ -699,22 +699,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
!aconnector->mst_port) {
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_port)
continue;
if (suspend)
drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
else
drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
}
mgr = &aconnector->mst_mgr;
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
ret = drm_dp_mst_topology_mgr_resume(mgr);
if (ret < 0) {
drm_dp_mst_topology_mgr_set_mst(mgr, false);
need_hotplug = true;
}
}
}
drm_modeset_unlock(&dev->mode_config.connection_mutex);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
}
/**
......@@ -898,7 +912,6 @@ static int dm_resume(void *handle)
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
enum dc_connection_type new_connection_type = dc_connection_none;
int ret;
int i;
/* power on hardware */
......@@ -971,13 +984,13 @@ static int dm_resume(void *handle)
}
}
ret = drm_atomic_helper_resume(ddev, dm->cached_state);
drm_atomic_helper_resume(ddev, dm->cached_state);
dm->cached_state = NULL;
amdgpu_dm_irq_resume_late(adev);
return ret;
return 0;
}
/**
......
......@@ -127,12 +127,13 @@ enum amd_pp_task {
};
enum PP_SMC_POWER_PROFILE {
PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0,
PP_SMC_POWER_PROFILE_POWERSAVING = 0x1,
PP_SMC_POWER_PROFILE_VIDEO = 0x2,
PP_SMC_POWER_PROFILE_VR = 0x3,
PP_SMC_POWER_PROFILE_COMPUTE = 0x4,
PP_SMC_POWER_PROFILE_CUSTOM = 0x5,
PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
PP_SMC_POWER_PROFILE_VIDEO = 0x3,
PP_SMC_POWER_PROFILE_VR = 0x4,
PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
};
enum {
......
......@@ -64,17 +64,19 @@ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
{
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4;
hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING;
hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO;
hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR;
hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
}
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
......
......@@ -77,8 +77,9 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
static const struct profile_mode_setting smu7_profiling[6] =
{{1, 0, 100, 30, 1, 0, 100, 10},
static const struct profile_mode_setting smu7_profiling[7] =
{{0, 0, 0, 0, 0, 0, 0, 0},
{1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 10, 16, 31},
{1, 0, 11, 50, 1, 0, 100, 10},
......@@ -4889,7 +4890,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
uint32_t i, size = 0;
uint32_t len;
static const char *profile_name[6] = {"3D_FULL_SCREEN",
static const char *profile_name[7] = {"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
......
......@@ -804,9 +804,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->backend = data;
hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
vega10_set_default_registry_data(hwmgr);
data->disable_dpm_mask = 0xff;
......@@ -4668,13 +4668,15 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, size = 0;
static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
{70, 60, 1, 3,},
{90, 60, 0, 0,},
{70, 60, 0, 0,},
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
static const char *profile_name[6] = {"3D_FULL_SCREEN",
static const char *profile_name[7] = {"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
......
......@@ -390,9 +390,9 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->backend = data;
hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
vega20_set_default_registry_data(hwmgr);
......@@ -980,6 +980,9 @@ static int vega20_od8_set_feature_capabilities(
pp_table->FanZeroRpmEnable)
od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
if (!od_settings->overdrive8_capabilities)
hwmgr->od_enabled = false;
return 0;
}
......@@ -1689,13 +1692,6 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
"Failed to set soft min memclk !",
return ret);
min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
"Failed to set hard min memclk !",
return ret);
}
if (data->smu_features[GNLD_DPM_UVD].enabled &&
......@@ -2248,6 +2244,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
if (soft_max_level >= data->dpm_table.gfx_table.count) {
pr_err("Clock level specified %d is over max allowed %d\n",
soft_max_level,
data->dpm_table.gfx_table.count - 1);
return -EINVAL;
}
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
data->dpm_table.gfx_table.dpm_state.soft_max_level =
......@@ -2268,6 +2271,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
if (soft_max_level >= data->dpm_table.mem_table.count) {
pr_err("Clock level specified %d is over max allowed %d\n",
soft_max_level,
data->dpm_table.mem_table.count - 1);
return -EINVAL;
}
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
data->dpm_table.mem_table.dpm_state.soft_max_level =
......@@ -3261,6 +3271,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
int pplib_workload = 0;
switch (power_profile) {
case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
pplib_workload = WORKLOAD_DEFAULT_BIT;
break;
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break;
......@@ -3290,6 +3303,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
uint32_t i, size = 0;
uint16_t workload_type = 0;
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
......
......@@ -705,7 +705,7 @@ enum PP_TABLE_VERSION {
/**
* The main hardware manager structure.
*/
#define Workload_Policy_Max 5
#define Workload_Policy_Max 6
struct pp_hwmgr {
void *adev;
......
......@@ -98,6 +98,8 @@
#define DP0_STARTVAL 0x064c
#define DP0_ACTIVEVAL 0x0650
#define DP0_SYNCVAL 0x0654
#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
#define DP0_MISC 0x0658
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
#define BPC_6 (0 << 5)
......@@ -142,6 +144,8 @@
#define DP0_LTLOOPCTRL 0x06d8
#define DP0_SNKLTCTRL 0x06e4
#define DP1_SRCCTRL 0x07a0
/* PHY */
#define DP_PHY_CTRL 0x0800
#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
......@@ -150,6 +154,7 @@
#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
......@@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
unsigned long rate;
u32 value;
int ret;
u32 dp_phy_ctrl;
rate = clk_get_rate(tc->refclk);
switch (rate) {
......@@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
if (tc->link.base.num_lanes == 2)
dp_phy_ctrl |= PHY_2LANE;
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
/*
* Initially PLLs are in bypass. Force PLL parameter update,
......@@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
......@@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
if (!tc->mode)
return -EINVAL;
/* from excel file - DP0_SrcCtrl */
tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
/* from excel file - DP1_SrcCtrl */
tc_write(0x07a0, 0x00003083);
tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
tc_write(DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
rate = clk_get_rate(tc->refclk);
switch (rate) {
......@@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
}
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
/* Setup Main Link */
dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
if (tc->link.base.num_lanes == 2)
dp_phy_ctrl |= PHY_2LANE;
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
msleep(100);
......@@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct tc_data *tc = connector_to_tc(connector);
u32 req, avail;
u32 bits_per_pixel = 24;
/* DPI interface clock limitation: upto 154 MHz */
if (mode->clock > 154000)
return MODE_CLOCK_HIGH;
req = mode->clock * bits_per_pixel / 8;
avail = tc->link.base.num_lanes * tc->link.base.rate;
if (req > avail)
return MODE_BAD;
return MODE_OK;
}
......@@ -1186,7 +1209,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
/* Create eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
DRM_MODE_CONNECTOR_eDP);
tc->panel ? DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort);
if (ret)
return ret;
......@@ -1195,6 +1219,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
drm_display_info_set_bus_formats(&tc->connector.display_info,
&bus_format, 1);
tc->connector.display_info.bus_flags =
DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_NEGEDGE |
DRM_BUS_FLAG_SYNC_NEGEDGE;
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
......
......@@ -616,7 +616,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment