Commit e5e4e223 authored by Huang Rui's avatar Huang Rui Committed by Alex Deucher

drm/amd/powerplay: add interface to get clock by type with latency for display (v2)

This patch adds get clock by type with latency, display will use it to get
current clocks with latency.

v2: fix the missed mutex lock before return.
Signed-off-by: default avatarHuang Rui <ray.huang@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6ec82684
......@@ -392,14 +392,21 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
void *pp_handle = adev->powerplay.pp_handle;
struct pp_clock_levels_with_latency pp_clks = { 0 };
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret;
if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
dc_to_pp_clock_type(clk_type),
&pp_clks);
if (ret)
return false;
} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
if (smu_get_clock_by_type_with_latency(&adev->smu,
dc_to_pp_clock_type(clk_type),
&pp_clks))
return false;
}
if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
return false;
if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
dc_to_pp_clock_type(clk_type),
&pp_clks))
return false;
pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
......
......@@ -224,6 +224,11 @@ struct pptable_funcs {
int (*populate_umd_state_clk)(struct smu_context *smu);
int (*print_clk_levels)(struct smu_context *smu, enum pp_clock_type type, char *buf);
int (*force_clk_levels)(struct smu_context *smu, enum pp_clock_type type, uint32_t mask);
int (*get_clock_by_type_with_latency)(struct smu_context *smu,
enum amd_pp_clock_type type,
struct
pp_clock_levels_with_latency
*clocks);
};
struct smu_funcs
......@@ -379,6 +384,9 @@ struct smu_funcs
((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0)
#define smu_get_max_high_clocks(smu, clocks) \
((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0)
#define smu_get_clock_by_type_with_latency(smu, type, clocks) \
((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (type), (clocks)) : 0)
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
......
......@@ -782,6 +782,44 @@ static int vega20_force_clk_levels(struct smu_context *smu,
return 0;
}
static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
int ret;
struct vega20_single_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct vega20_dpm_table *dpm_table = NULL;
dpm_table = smu_dpm->dpm_context;
mutex_lock(&smu->mutex);
switch (type) {
case amd_pp_sys_clock:
single_dpm_table = &(dpm_table->gfx_table);
ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
break;
case amd_pp_mem_clock:
single_dpm_table = &(dpm_table->mem_table);
ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
break;
case amd_pp_dcef_clock:
single_dpm_table = &(dpm_table->dcef_table);
ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
break;
case amd_pp_soc_clock:
single_dpm_table = &(dpm_table->soc_table);
ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&smu->mutex);
return ret;
}
static const struct pptable_funcs vega20_ppt_funcs = {
.alloc_dpm_context = vega20_allocate_dpm_context,
.store_powerplay_table = vega20_store_powerplay_table,
......@@ -794,7 +832,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.populate_umd_state_clk = vega20_populate_umd_state_clk,
.print_clk_levels = vega20_print_clk_levels,
.force_clk_levels = vega20_force_clk_levels,
.get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
};
void vega20_set_ppt_funcs(struct smu_context *smu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment