Commit ad44518a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2021-11-19' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "This week's fixes, pretty quiet, about right for rc2. amdgpu is the
  bulk of them but the scheduler ones have been reported in a few places
  I think.

  Otherwise just some minor i915 fixes and a few other scattered around:

  scheduler:
   - two refcounting fixes

  cma-helper:
   - use correct free path for noncoherent

  efifb:
   - probing fix

  amdgpu:
   - Better debugging info for SMU msgs
   - Better error reporting when adding IP blocks
   - Fix UVD powergating regression on CZ
   - Clock reporting fix for navi1x
   - OLED panel backlight fix
   - Fix scaling on VGA/DVI for non-DC display code
   - Fix GLFCLK handling for RGP on some APUs
   - fix potential memory leak

  amdkfd:
   - GPU reset fix

  i915:
   - return error handling fix
   - ADL-P display fix
   - TGL DSI display clocks fix

  nouveau:
   - infoframe corruption fix

  sun4i:
   - Kconfig fix"

* tag 'drm-fixes-2021-11-19' of git://anongit.freedesktop.org/drm/drm:
  drm/amd/amdgpu: fix potential memleak
  drm/amd/amdkfd: Fix kernel panic when reset failed and been triggered again
  drm/amd/pm: add GFXCLK/SCLK clocks level print support for APUs
  drm/amdgpu: fix set scaling mode Full/Full aspect/Center not works on vga and dvi connectors
  drm/amd/display: Fix OLED brightness control on eDP
  drm/amd/pm: Remove artificial freq level on Navi1x
  drm/amd/pm: avoid duplicate powergate/ungate setting
  drm/amdgpu: add error print when failing to add IP block(v2)
  drm/amd/pm: Enhanced reporting also for a stuck command
  drm/i915/guc: fix NULL vs IS_ERR() checking
  drm/i915/dsi/xelpd: Fix the bit mask for wakeup GB
  Revert "drm/i915/tgl/dsi: Gate the ddi clocks after pll mapping"
  fbdev: Prevent probing generic drivers if a FB is already registered
  drm/scheduler: fix drm_sched_job_add_implicit_dependencies harder
  drm/scheduler: fix drm_sched_job_add_implicit_dependencies
  drm/sun4i: fix unmet dependency on RESET_CONTROLLER for PHY_SUN6I_MIPI_DPHY
  drm/cma-helper: Release non-coherent memory with dma_free_noncoherent()
  drm/nouveau: hdmigv100.c: fix corrupted HDMI Vendor InfoFrame
parents 0dc636b3 7d51040a
...@@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) ...@@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
amdgpu_connector_get_edid(connector); amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector); ret = amdgpu_connector_ddc_get_modes(connector);
amdgpu_get_native_mode(connector);
return ret; return ret;
} }
......
...@@ -3509,6 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -3509,6 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->rmmio_size = pci_resource_len(adev->pdev, 2); adev->rmmio_size = pci_resource_len(adev->pdev, 2);
} }
for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
if (adev->rmmio == NULL) { if (adev->rmmio == NULL) {
return -ENOMEM; return -ENOMEM;
......
...@@ -587,6 +587,9 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) ...@@ -587,6 +587,9 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &nv_common_ip_block); amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add common ip block(GC_HWIP:0x%x)\n",
adev->ip_versions[GC_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -619,6 +622,9 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) ...@@ -619,6 +622,9 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add gmc ip block(GC_HWIP:0x%x)\n",
adev->ip_versions[GC_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -648,6 +654,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) ...@@ -648,6 +654,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
adev->ip_versions[OSSSYS_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -688,6 +697,9 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) ...@@ -688,6 +697,9 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
adev->ip_versions[MP0_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -726,6 +738,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) ...@@ -726,6 +738,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
adev->ip_versions[MP1_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -753,6 +768,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) ...@@ -753,6 +768,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dm_ip_block); amdgpu_device_ip_block_add(adev, &dm_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
adev->ip_versions[DCE_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
} else if (adev->ip_versions[DCI_HWIP][0]) { } else if (adev->ip_versions[DCI_HWIP][0]) {
...@@ -763,6 +781,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) ...@@ -763,6 +781,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dm_ip_block); amdgpu_device_ip_block_add(adev, &dm_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
adev->ip_versions[DCI_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
#endif #endif
...@@ -796,6 +817,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) ...@@ -796,6 +817,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add gfx ip block(GC_HWIP:0x%x)\n",
adev->ip_versions[GC_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -829,6 +853,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) ...@@ -829,6 +853,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
adev->ip_versions[SDMA0_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -845,6 +872,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) ...@@ -845,6 +872,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
adev->ip_versions[UVD_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
switch (adev->ip_versions[VCE_HWIP][0]) { switch (adev->ip_versions[VCE_HWIP][0]) {
...@@ -855,6 +885,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) ...@@ -855,6 +885,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
adev->ip_versions[VCE_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
} else { } else {
...@@ -893,6 +926,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) ...@@ -893,6 +926,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
break; break;
default: default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
adev->ip_versions[UVD_HWIP][0]);
return -EINVAL; return -EINVAL;
} }
} }
......
...@@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) ...@@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
"%s", "xgmi_hive_info"); "%s", "xgmi_hive_info");
if (ret) { if (ret) {
dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
kobject_put(&hive->kobj);
kfree(hive); kfree(hive);
hive = NULL; hive = NULL;
goto pro_end; goto pro_end;
......
...@@ -1226,6 +1226,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) ...@@ -1226,6 +1226,11 @@ static int stop_cpsch(struct device_queue_manager *dqm)
bool hanging; bool hanging;
dqm_lock(dqm); dqm_lock(dqm);
if (!dqm->sched_running) {
dqm_unlock(dqm);
return 0;
}
if (!dqm->is_hws_hang) if (!dqm->is_hws_hang)
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
hanging = dqm->is_hws_hang || dqm->is_resetting; hanging = dqm->is_hws_hang || dqm->is_resetting;
......
...@@ -4242,7 +4242,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -4242,7 +4242,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) { } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector); amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link); register_backlight_device(dm, link);
if (dm->num_of_edps)
update_connector_ext_caps(aconnector);
if (psr_feature_enabled) if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link); amdgpu_dm_set_psr_caps(link);
} }
......
...@@ -98,7 +98,8 @@ enum amd_ip_block_type { ...@@ -98,7 +98,8 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_ACP, AMD_IP_BLOCK_TYPE_ACP,
AMD_IP_BLOCK_TYPE_VCN, AMD_IP_BLOCK_TYPE_VCN,
AMD_IP_BLOCK_TYPE_MES, AMD_IP_BLOCK_TYPE_MES,
AMD_IP_BLOCK_TYPE_JPEG AMD_IP_BLOCK_TYPE_JPEG,
AMD_IP_BLOCK_TYPE_NUM,
}; };
enum amd_clockgating_state { enum amd_clockgating_state {
......
...@@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block ...@@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
{ {
int ret = 0; int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate");
return 0;
}
switch (block_type) { switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_UVD:
...@@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block ...@@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
break; break;
} }
if (!ret)
atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
return ret; return ret;
} }
......
...@@ -417,6 +417,12 @@ struct amdgpu_dpm { ...@@ -417,6 +417,12 @@ struct amdgpu_dpm {
enum amd_dpm_forced_level forced_level; enum amd_dpm_forced_level forced_level;
}; };
enum ip_power_state {
POWER_STATE_UNKNOWN,
POWER_STATE_ON,
POWER_STATE_OFF,
};
struct amdgpu_pm { struct amdgpu_pm {
struct mutex mutex; struct mutex mutex;
u32 current_sclk; u32 current_sclk;
...@@ -452,6 +458,8 @@ struct amdgpu_pm { ...@@ -452,6 +458,8 @@ struct amdgpu_pm {
struct i2c_adapter smu_i2c; struct i2c_adapter smu_i2c;
struct mutex smu_i2c_mutex; struct mutex smu_i2c_mutex;
struct list_head pm_attr_list; struct list_head pm_attr_list;
atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM];
}; };
#define R600_SSTU_DFLT 0 #define R600_SSTU_DFLT 0
......
...@@ -309,6 +309,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, ...@@ -309,6 +309,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
{ {
int ret = 0, size = 0; int ret = 0, size = 0;
uint32_t cur_value = 0; uint32_t cur_value = 0;
int i;
smu_cmn_get_sysfs_buf(&buf, &size); smu_cmn_get_sysfs_buf(&buf, &size);
...@@ -334,8 +335,6 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, ...@@ -334,8 +335,6 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n", size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
break; break;
case SMU_GFXCLK:
case SMU_SCLK:
case SMU_FCLK: case SMU_FCLK:
case SMU_MCLK: case SMU_MCLK:
case SMU_SOCCLK: case SMU_SOCCLK:
...@@ -346,6 +345,25 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, ...@@ -346,6 +345,25 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
return ret; return ret;
size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
break; break;
case SMU_SCLK:
case SMU_GFXCLK:
ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
return ret;
if (cur_value == CYAN_SKILLFISH_SCLK_MAX)
i = 2;
else if (cur_value == CYAN_SKILLFISH_SCLK_MIN)
i = 0;
else
i = 1;
size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MIN,
i == 0 ? "*" : "");
size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
i == 1 ? cur_value : cyan_skillfish_sclk_default,
i == 1 ? "*" : "");
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MAX,
i == 2 ? "*" : "");
break;
default: default:
dev_warn(smu->adev->dev, "Unsupported clock type\n"); dev_warn(smu->adev->dev, "Unsupported clock type\n");
return ret; return ret;
......
...@@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, ...@@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf) enum smu_clk_type clk_type, char *buf)
{ {
uint16_t *curve_settings; uint16_t *curve_settings;
int i, size = 0, ret = 0; int i, levels, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0}; uint32_t freq_values[3] = {0};
uint32_t mark_index = 0; uint32_t mark_index = 0;
...@@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu, ...@@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu,
freq_values[1] = cur_value; freq_values[1] = cur_value;
mark_index = cur_value == freq_values[0] ? 0 : mark_index = cur_value == freq_values[0] ? 0 :
cur_value == freq_values[2] ? 2 : 1; cur_value == freq_values[2] ? 2 : 1;
if (mark_index != 1)
freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
for (i = 0; i < 3; i++) { levels = 3;
if (mark_index != 1) {
levels = 2;
freq_values[1] = freq_values[2];
}
for (i = 0; i < levels; i++) {
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i], size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
i == mark_index ? "*" : ""); i == mark_index ? "*" : "");
} }
} }
break; break;
case SMU_PCIE: case SMU_PCIE:
......
...@@ -683,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, ...@@ -683,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0; int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false; bool cur_value_match_level = false;
uint32_t min, max;
memset(&metrics, 0, sizeof(metrics)); memset(&metrics, 0, sizeof(metrics));
...@@ -743,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu, ...@@ -743,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
break; break;
case SMU_GFXCLK:
case SMU_SCLK:
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
if (ret) {
return ret;
}
break;
default: default:
break; break;
} }
...@@ -768,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu, ...@@ -768,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (!cur_value_match_level) if (!cur_value_match_level)
size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
break; break;
case SMU_GFXCLK:
case SMU_SCLK:
min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
if (cur_value == max)
i = 2;
else if (cur_value == min)
i = 0;
else
i = 1;
size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
i == 0 ? "*" : "");
size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
i == 1 ? "*" : "");
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
i == 2 ? "*" : "");
break;
default: default:
break; break;
} }
......
...@@ -697,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu, ...@@ -697,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu,
case SMU_FCLK: case SMU_FCLK:
return smu_cmn_send_smc_msg_with_param(smu, return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetFclkFrequency, 0, value); SMU_MSG_GetFclkFrequency, 0, value);
case SMU_GFXCLK:
case SMU_SCLK:
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetGfxclkFrequency, 0, value);
break;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -967,6 +972,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, ...@@ -967,6 +972,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
{ {
int i, size = 0, ret = 0; int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size); smu_cmn_get_sysfs_buf(&buf, &size);
...@@ -1005,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, ...@@ -1005,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
cur_value == value ? "*" : ""); cur_value == value ? "*" : "");
} }
break; break;
case SMU_GFXCLK:
case SMU_SCLK:
ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
goto print_clk_out;
min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
if (cur_value == max)
i = 2;
else if (cur_value == min)
i = 0;
else
i = 1;
size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
i == 0 ? "*" : "");
size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK,
i == 1 ? "*" : "");
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
i == 2 ? "*" : "");
break;
default: default:
break; break;
} }
......
...@@ -24,5 +24,6 @@ ...@@ -24,5 +24,6 @@
#define __YELLOW_CARP_PPT_H__ #define __YELLOW_CARP_PPT_H__
extern void yellow_carp_set_ppt_funcs(struct smu_context *smu); extern void yellow_carp_set_ppt_funcs(struct smu_context *smu);
#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100
#endif #endif
...@@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, ...@@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
const char *message = smu_get_message_name(smu, msg); const char *message = smu_get_message_name(smu, msg);
switch (reg_c2pmsg_90) { switch (reg_c2pmsg_90) {
case SMU_RESP_NONE: case SMU_RESP_NONE: {
u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
dev_err_ratelimited(adev->dev, dev_err_ratelimited(adev->dev,
"SMU: I'm not done with your previous command!"); "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
msg_idx, prm);
}
break; break;
case SMU_RESP_OK: case SMU_RESP_OK:
/* The SMU executed the command. It completed with a /* The SMU executed the command. It completed with a
......
...@@ -210,6 +210,11 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) ...@@ -210,6 +210,11 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map); dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt); drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
} else if (cma_obj->vaddr) { } else if (cma_obj->vaddr) {
if (cma_obj->map_noncoherent)
dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
cma_obj->vaddr, cma_obj->paddr,
DMA_TO_DEVICE);
else
dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
cma_obj->vaddr, cma_obj->paddr); cma_obj->vaddr, cma_obj->paddr);
} }
......
...@@ -696,9 +696,6 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, ...@@ -696,9 +696,6 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
for_each_dsi_phy(phy, intel_dsi->phys) { for_each_dsi_phy(phy, intel_dsi->phys) {
if (DISPLAY_VER(dev_priv) >= 12)
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
else
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
} }
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
...@@ -1135,8 +1132,6 @@ static void ...@@ -1135,8 +1132,6 @@ static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state) const struct intel_crtc_state *crtc_state)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
/* step 4a: power up all lanes of the DDI used by DSI */ /* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder); gen11_dsi_power_up_lanes(encoder);
...@@ -1162,7 +1157,6 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, ...@@ -1162,7 +1157,6 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_configure_transcoder(encoder, crtc_state); gen11_dsi_configure_transcoder(encoder, crtc_state);
/* Step 4l: Gate DDI clocks */ /* Step 4l: Gate DDI clocks */
if (DISPLAY_VER(dev_priv) == 11)
gen11_dsi_gate_clocks(encoder); gen11_dsi_gate_clocks(encoder);
} }
...@@ -1271,7 +1265,8 @@ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder) ...@@ -1271,7 +1265,8 @@ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder)
if (DISPLAY_VER(i915) == 13) { if (DISPLAY_VER(i915) == 13) {
for_each_dsi_port(port, intel_dsi->ports) for_each_dsi_port(port, intel_dsi->ports)
intel_de_rmw(i915, TGL_DSI_CHKN_REG(port), intel_de_rmw(i915, TGL_DSI_CHKN_REG(port),
TGL_DSI_CHKN_LSHS_GB, 0x4); TGL_DSI_CHKN_LSHS_GB_MASK,
TGL_DSI_CHKN_LSHS_GB(4));
} }
} }
......
...@@ -3080,8 +3080,8 @@ guc_create_parallel(struct intel_engine_cs **engines, ...@@ -3080,8 +3080,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
ce = intel_engine_create_virtual(siblings, num_siblings, ce = intel_engine_create_virtual(siblings, num_siblings,
FORCE_VIRTUAL); FORCE_VIRTUAL);
if (!ce) { if (IS_ERR(ce)) {
err = ERR_PTR(-ENOMEM); err = ERR_CAST(ce);
goto unwind; goto unwind;
} }
......
...@@ -11717,7 +11717,9 @@ enum skl_power_gate { ...@@ -11717,7 +11717,9 @@ enum skl_power_gate {
#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \ #define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \
_TGL_DSI_CHKN_REG_0, \ _TGL_DSI_CHKN_REG_0, \
_TGL_DSI_CHKN_REG_1) _TGL_DSI_CHKN_REG_1)
#define TGL_DSI_CHKN_LSHS_GB REG_GENMASK(15, 12) #define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12)
#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \
(byte_clocks))
/* Display Stream Splitter Control */ /* Display Stream Splitter Control */
#define DSS_CTL1 _MMIO(0x67400) #define DSS_CTL1 _MMIO(0x67400)
......
...@@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, ...@@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet,
nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header); nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header);
nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low); nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low);
nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high); nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high);
nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000); nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000);
......
...@@ -704,10 +704,14 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, ...@@ -704,10 +704,14 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
int ret; int ret;
dma_resv_for_each_fence(&cursor, obj->resv, write, fence) { dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
/* Make sure to grab an additional ref on the added fence */
dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence); ret = drm_sched_job_add_dependency(job, fence);
if (ret) if (ret) {
dma_fence_put(fence);
return ret; return ret;
} }
}
return 0; return 0;
} }
EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
......
...@@ -46,6 +46,7 @@ config DRM_SUN6I_DSI ...@@ -46,6 +46,7 @@ config DRM_SUN6I_DSI
default MACH_SUN8I default MACH_SUN8I
select CRC_CCITT select CRC_CCITT
select DRM_MIPI_DSI select DRM_MIPI_DSI
select RESET_CONTROLLER
select PHY_SUN6I_MIPI_DPHY select PHY_SUN6I_MIPI_DPHY
help help
Choose this option if you want have an Allwinner SoC with Choose this option if you want have an Allwinner SoC with
......
...@@ -351,6 +351,17 @@ static int efifb_probe(struct platform_device *dev) ...@@ -351,6 +351,17 @@ static int efifb_probe(struct platform_device *dev)
char *option = NULL; char *option = NULL;
efi_memory_desc_t md; efi_memory_desc_t md;
/*
* Generic drivers must not be registered if a framebuffer exists.
* If a native driver was probed, the display hardware was already
* taken and attempting to use the system framebuffer is dangerous.
*/
if (num_registered_fb > 0) {
dev_err(&dev->dev,
"efifb: a framebuffer is already registered\n");
return -EINVAL;
}
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled) if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
return -ENODEV; return -ENODEV;
......
...@@ -407,6 +407,17 @@ static int simplefb_probe(struct platform_device *pdev) ...@@ -407,6 +407,17 @@ static int simplefb_probe(struct platform_device *pdev)
struct simplefb_par *par; struct simplefb_par *par;
struct resource *mem; struct resource *mem;
/*
* Generic drivers must not be registered if a framebuffer exists.
* If a native driver was probed, the display hardware was already
* taken and attempting to use the system framebuffer is dangerous.
*/
if (num_registered_fb > 0) {
dev_err(&pdev->dev,
"simplefb: a framebuffer is already registered\n");
return -EINVAL;
}
if (fb_get_options("simplefb", NULL)) if (fb_get_options("simplefb", NULL))
return -ENODEV; return -ENODEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment