Commit a81ac299 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-5.10-2020-12-09' of...

Merge tag 'amd-drm-fixes-5.10-2020-12-09' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

amd-drm-fixes-5.10-2020-12-09:

amdgpu:
- Fan fix for CI asics
- Fix a warning in possible_crtcs
- Build fix for when debugfs is disabled
- Display overflow fix
- Display watermark fixes for Renoir
- SDMA 5.2 fix
- Stolen vga memory regression fix
- Power profile fixes
- Fix a regression from removal of GEM and PRIME callbacks

amdkfd:
- Fix a memory leak in dmabuf import
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201210034848.18108-1-alexander.deucher@amd.com
parents 0477e928 c5b58c8c
...@@ -459,6 +459,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) ...@@ -459,6 +459,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct amdgpu_bo_param bp; struct amdgpu_bo_param bp;
struct drm_gem_object *gobj;
int ret; int ret;
memset(&bp, 0, sizeof(bp)); memset(&bp, 0, sizeof(bp));
...@@ -469,17 +470,20 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) ...@@ -469,17 +470,20 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
bp.type = ttm_bo_type_sg; bp.type = ttm_bo_type_sg;
bp.resv = resv; bp.resv = resv;
dma_resv_lock(resv, NULL); dma_resv_lock(resv, NULL);
ret = amdgpu_bo_create(adev, &bp, &bo); ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_CPU,
0, ttm_bo_type_sg, resv, &gobj);
if (ret) if (ret)
goto error; goto error;
bo = gem_to_amdgpu_bo(gobj);
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
if (dma_buf->ops != &amdgpu_dmabuf_ops) if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1; bo->prime_shared_count = 1;
dma_resv_unlock(resv); dma_resv_unlock(resv);
return &bo->tbo.base; return gobj;
error: error:
dma_resv_unlock(resv); dma_resv_unlock(resv);
......
...@@ -66,26 +66,12 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -66,26 +66,12 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
bp.type = type; bp.type = type;
bp.resv = resv; bp.resv = resv;
bp.preferred_domain = initial_domain; bp.preferred_domain = initial_domain;
retry:
bp.flags = flags; bp.flags = flags;
bp.domain = initial_domain; bp.domain = initial_domain;
r = amdgpu_bo_create(adev, &bp, &bo); r = amdgpu_bo_create(adev, &bp, &bo);
if (r) { if (r)
if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
goto retry;
}
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
}
DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r; return r;
}
*obj = &bo->tbo.base; *obj = &bo->tbo.base;
return 0; return 0;
...@@ -225,7 +211,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -225,7 +211,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
uint64_t size = args->in.bo_size; uint64_t size = args->in.bo_size;
struct dma_resv *resv = NULL; struct dma_resv *resv = NULL;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
uint32_t handle; uint32_t handle, initial_domain;
int r; int r;
/* reject invalid gem flags */ /* reject invalid gem flags */
...@@ -269,9 +255,28 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -269,9 +255,28 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
resv = vm->root.base.bo->tbo.base.resv; resv = vm->root.base.bo->tbo.base.resv;
} }
retry:
initial_domain = (u32)(0xffffffff & args->in.domains);
r = amdgpu_gem_object_create(adev, size, args->in.alignment, r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains), initial_domain,
flags, ttm_bo_type_device, resv, &gobj); flags, ttm_bo_type_device, resv, &gobj);
if (r) {
if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
goto retry;
}
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
}
DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
size, initial_domain, args->in.alignment, r);
}
return r;
}
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) { if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
......
...@@ -499,6 +499,9 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) ...@@ -499,6 +499,9 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
else else
size = amdgpu_gmc_get_vbios_fb_size(adev); size = amdgpu_gmc_get_vbios_fb_size(adev);
if (adev->mman.keep_stolen_vga_memory)
size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
/* set to 0 if the pre-OS buffer uses up most of vram */ /* set to 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
size = 0; size = 0;
......
...@@ -1172,7 +1172,7 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) ...@@ -1172,7 +1172,7 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
con->dir, &con->disable_ras_err_cnt_harvest); con->dir, &con->disable_ras_err_cnt_harvest);
} }
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head) struct ras_fs_if *head)
{ {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
...@@ -1194,7 +1194,6 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, ...@@ -1194,7 +1194,6 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj; struct ras_manager *obj;
struct ras_fs_if fs_info; struct ras_fs_if fs_info;
...@@ -1203,7 +1202,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) ...@@ -1203,7 +1202,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
* it won't be called in resume path, no need to check * it won't be called in resume path, no need to check
* suspend and gpu reset status * suspend and gpu reset status
*/ */
if (!con) if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
return; return;
amdgpu_ras_debugfs_create_ctrl_node(adev); amdgpu_ras_debugfs_create_ctrl_node(adev);
...@@ -1217,10 +1216,9 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) ...@@ -1217,10 +1216,9 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_create(adev, &fs_info); amdgpu_ras_debugfs_create(adev, &fs_info);
} }
} }
#endif
} }
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, static void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head) struct ras_common_if *head)
{ {
struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
...@@ -1234,7 +1232,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, ...@@ -1234,7 +1232,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp; struct ras_manager *obj, *tmp;
...@@ -1243,7 +1240,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) ...@@ -1243,7 +1240,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
} }
con->dir = NULL; con->dir = NULL;
#endif
} }
/* debugfs end */ /* debugfs end */
...@@ -1291,7 +1287,8 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) ...@@ -1291,7 +1287,8 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
{ {
amdgpu_ras_debugfs_remove_all(adev); if (IS_ENABLED(CONFIG_DEBUG_FS))
amdgpu_ras_debugfs_remove_all(adev);
amdgpu_ras_sysfs_remove_all(adev); amdgpu_ras_sysfs_remove_all(adev);
return 0; return 0;
} }
......
...@@ -607,14 +607,8 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, ...@@ -607,14 +607,8 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head); struct ras_common_if *head);
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head);
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev); void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head);
int amdgpu_ras_error_query(struct amdgpu_device *adev, int amdgpu_ras_error_query(struct amdgpu_device *adev,
struct ras_query_if *info); struct ras_query_if *info);
......
...@@ -186,7 +186,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) ...@@ -186,7 +186,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
if (err) if (err)
goto out; goto out;
err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]); err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
if (err) if (err)
goto out; goto out;
} }
......
...@@ -1736,6 +1736,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, ...@@ -1736,6 +1736,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
} }
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
...@@ -1745,6 +1746,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, ...@@ -1745,6 +1746,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL); amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock: err_unlock:
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
dma_buf_put(dmabuf);
return r; return r;
} }
......
...@@ -1058,9 +1058,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1058,9 +1058,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error; goto error;
} }
/* Update the actual used number of crtc */
adev->mode_info.num_crtc = adev->dm.display_indexes_num;
/* create fake encoders for MST */ /* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev); dm_dp_create_fake_mst_encoders(adev);
...@@ -3251,6 +3248,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -3251,6 +3248,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane; const struct dc_plane_cap *plane;
dm->display_indexes_num = dm->dc->caps.max_streams;
/* Update the actual used number of crtc */
adev->mode_info.num_crtc = adev->dm.display_indexes_num;
link_cnt = dm->dc->caps.max_links; link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) { if (amdgpu_dm_mode_config_init(dm->adev)) {
DRM_ERROR("DM: Failed to initialize mode config\n"); DRM_ERROR("DM: Failed to initialize mode config\n");
...@@ -3312,8 +3313,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -3312,8 +3313,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail; goto fail;
} }
dm->display_indexes_num = dm->dc->caps.max_streams;
/* loops over all connectors on the board */ /* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) { for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL; struct dc_link *link = NULL;
......
...@@ -579,7 +579,7 @@ static struct clk_bw_params rn_bw_params = { ...@@ -579,7 +579,7 @@ static struct clk_bw_params rn_bw_params = {
}; };
static struct wm_table ddr4_wm_table = { static struct wm_table ddr4_wm_table_gs = {
.entries = { .entries = {
{ {
.wm_inst = WM_A, .wm_inst = WM_A,
...@@ -616,7 +616,7 @@ static struct wm_table ddr4_wm_table = { ...@@ -616,7 +616,7 @@ static struct wm_table ddr4_wm_table = {
} }
}; };
static struct wm_table lpddr4_wm_table = { static struct wm_table lpddr4_wm_table_gs = {
.entries = { .entries = {
{ {
.wm_inst = WM_A, .wm_inst = WM_A,
...@@ -690,6 +690,80 @@ static struct wm_table lpddr4_wm_table_with_disabled_ppt = { ...@@ -690,6 +690,80 @@ static struct wm_table lpddr4_wm_table_with_disabled_ppt = {
} }
}; };
static struct wm_table ddr4_wm_table_rn = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9.09,
.sr_enter_plus_exit_time_us = 10.14,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
}
};
static struct wm_table lpddr4_wm_table_rn = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 7.32,
.sr_enter_plus_exit_time_us = 8.38,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.82,
.sr_enter_plus_exit_time_us = 11.196,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.89,
.sr_enter_plus_exit_time_us = 11.24,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.748,
.sr_enter_plus_exit_time_us = 11.102,
.valid = true,
},
}
};
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{ {
int i; int i;
...@@ -771,6 +845,11 @@ void rn_clk_mgr_construct( ...@@ -771,6 +845,11 @@ void rn_clk_mgr_construct(
struct dc_debug_options *debug = &ctx->dc->debug; struct dc_debug_options *debug = &ctx->dc->debug;
struct dpm_clocks clock_table = { 0 }; struct dpm_clocks clock_table = { 0 };
enum pp_smu_status status = 0; enum pp_smu_status status = 0;
int is_green_sardine = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
#endif
clk_mgr->base.ctx = ctx; clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs; clk_mgr->base.funcs = &dcn21_funcs;
...@@ -811,10 +890,16 @@ void rn_clk_mgr_construct( ...@@ -811,10 +890,16 @@ void rn_clk_mgr_construct(
if (clk_mgr->periodic_retraining_disabled) { if (clk_mgr->periodic_retraining_disabled) {
rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt;
} else { } else {
rn_bw_params.wm_table = lpddr4_wm_table; if (is_green_sardine)
rn_bw_params.wm_table = lpddr4_wm_table_gs;
else
rn_bw_params.wm_table = lpddr4_wm_table_rn;
} }
} else { } else {
rn_bw_params.wm_table = ddr4_wm_table; if (is_green_sardine)
rn_bw_params.wm_table = ddr4_wm_table_gs;
else
rn_bw_params.wm_table = ddr4_wm_table_rn;
} }
/* Saved clocks configured at boot for debug purposes */ /* Saved clocks configured at boot for debug purposes */
rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
......
...@@ -3394,10 +3394,13 @@ uint32_t dc_bandwidth_in_kbps_from_timing( ...@@ -3394,10 +3394,13 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
{ {
uint32_t bits_per_channel = 0; uint32_t bits_per_channel = 0;
uint32_t kbps; uint32_t kbps;
struct fixed31_32 link_bw_kbps;
if (timing->flags.DSC) { if (timing->flags.DSC) {
kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz);
kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160);
link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel);
kbps = dc_fixpt_ceil(link_bw_kbps);
return kbps; return kbps;
} }
......
...@@ -136,14 +136,12 @@ ...@@ -136,14 +136,12 @@
#define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT) #define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT)
/* Workload bits */ /* Workload bits */
#define WORKLOAD_DEFAULT_BIT 0 #define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 #define WORKLOAD_PPLIB_VIDEO_BIT 2
#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 #define WORKLOAD_PPLIB_VR_BIT 3
#define WORKLOAD_PPLIB_VIDEO_BIT 3 #define WORKLOAD_PPLIB_COMPUTE_BIT 4
#define WORKLOAD_PPLIB_VR_BIT 4 #define WORKLOAD_PPLIB_CUSTOM_BIT 5
#define WORKLOAD_PPLIB_COMPUTE_BIT 5 #define WORKLOAD_PPLIB_COUNT 6
#define WORKLOAD_PPLIB_CUSTOM_BIT 6
#define WORKLOAD_PPLIB_COUNT 7
typedef struct { typedef struct {
/* MP1_EXT_SCRATCH0 */ /* MP1_EXT_SCRATCH0 */
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pci.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include "processpptables.h" #include "processpptables.h"
#include <atom-types.h> #include <atom-types.h>
...@@ -984,6 +986,8 @@ static int init_thermal_controller( ...@@ -984,6 +986,8 @@ static int init_thermal_controller(
struct pp_hwmgr *hwmgr, struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
{ {
struct amdgpu_device *adev = hwmgr->adev;
hwmgr->thermal_controller.ucType = hwmgr->thermal_controller.ucType =
powerplay_table->sThermalController.ucType; powerplay_table->sThermalController.ucType;
hwmgr->thermal_controller.ucI2cLine = hwmgr->thermal_controller.ucI2cLine =
...@@ -1008,7 +1012,104 @@ static int init_thermal_controller( ...@@ -1008,7 +1012,104 @@ static int init_thermal_controller(
ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
PHM_PlatformCaps_ThermalController); PHM_PlatformCaps_ThermalController);
hwmgr->thermal_controller.use_hw_fan_control = 1; if (powerplay_table->usTableSize >= sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
(const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
if (0 == le16_to_cpu(powerplay_table3->usFanTableOffset)) {
hwmgr->thermal_controller.use_hw_fan_control = 1;
return 0;
} else {
const ATOM_PPLIB_FANTABLE *fan_table =
(const ATOM_PPLIB_FANTABLE *)(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table3->usFanTableOffset));
if (1 <= fan_table->ucFanTableFormat) {
hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst =
fan_table->ucTHyst;
hwmgr->thermal_controller.advanceFanControlParameters.usTMin =
le16_to_cpu(fan_table->usTMin);
hwmgr->thermal_controller.advanceFanControlParameters.usTMed =
le16_to_cpu(fan_table->usTMed);
hwmgr->thermal_controller.advanceFanControlParameters.usTHigh =
le16_to_cpu(fan_table->usTHigh);
hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
le16_to_cpu(fan_table->usPWMMin);
hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed =
le16_to_cpu(fan_table->usPWMMed);
hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh =
le16_to_cpu(fan_table->usPWMHigh);
hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 10900;
hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay = 100000;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
}
if (2 <= fan_table->ucFanTableFormat) {
const ATOM_PPLIB_FANTABLE2 *fan_table2 =
(const ATOM_PPLIB_FANTABLE2 *)(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table3->usFanTableOffset));
hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
le16_to_cpu(fan_table2->usTMax);
}
if (3 <= fan_table->ucFanTableFormat) {
const ATOM_PPLIB_FANTABLE3 *fan_table3 =
(const ATOM_PPLIB_FANTABLE3 *) (((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table3->usFanTableOffset));
hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode =
fan_table3->ucFanControlMode;
if ((3 == fan_table->ucFanTableFormat) &&
(0x67B1 == adev->pdev->device))
hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
47;
else
hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
le16_to_cpu(fan_table3->usFanPWMMax);
hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity =
4836;
hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
le16_to_cpu(fan_table3->usFanOutputSensitivity);
}
if (6 <= fan_table->ucFanTableFormat) {
const ATOM_PPLIB_FANTABLE4 *fan_table4 =
(const ATOM_PPLIB_FANTABLE4 *)(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table3->usFanTableOffset));
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM);
hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM =
le16_to_cpu(fan_table4->usFanRPMMax);
}
if (7 <= fan_table->ucFanTableFormat) {
const ATOM_PPLIB_FANTABLE5 *fan_table5 =
(const ATOM_PPLIB_FANTABLE5 *)(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table3->usFanTableOffset));
if (0x67A2 == adev->pdev->device ||
0x67A9 == adev->pdev->device ||
0x67B9 == adev->pdev->device) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_GeminiRegulatorFanControlSupport);
hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentLow =
le16_to_cpu(fan_table5->usFanCurrentLow);
hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentHigh =
le16_to_cpu(fan_table5->usFanCurrentHigh);
hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMLow =
le16_to_cpu(fan_table5->usFanRPMLow);
hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMHigh =
le16_to_cpu(fan_table5->usFanRPMHigh);
}
}
}
}
return 0; return 0;
} }
......
...@@ -1297,15 +1297,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile) ...@@ -1297,15 +1297,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
int pplib_workload = 0; int pplib_workload = 0;
switch (power_profile) { switch (power_profile) {
case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
pplib_workload = WORKLOAD_DEFAULT_BIT;
break;
case PP_SMC_POWER_PROFILE_FULLSCREEN3D: case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break; break;
case PP_SMC_POWER_PROFILE_POWERSAVING:
pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
break;
case PP_SMC_POWER_PROFILE_VIDEO: case PP_SMC_POWER_PROFILE_VIDEO:
pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
break; break;
...@@ -1315,6 +1309,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile) ...@@ -1315,6 +1309,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
case PP_SMC_POWER_PROFILE_COMPUTE: case PP_SMC_POWER_PROFILE_COMPUTE:
pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
break; break;
case PP_SMC_POWER_PROFILE_CUSTOM:
pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
break;
} }
return pplib_workload; return pplib_workload;
......
...@@ -217,7 +217,7 @@ static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_ ...@@ -217,7 +217,7 @@ static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment