Commit cf85f5de authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2020-09-04' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Not much going on this week, nouveau has a display hw bug workaround,
  amdgpu has some PM fixes and CIK regression fixes, one single radeon
  PLL fix, and a couple of i915 display fixes.

  amdgpu:
   - Fix for 32bit systems
   - SW CTF fix
   - Update for Sienna Cichlid
   - CIK bug fixes

  radeon:
   - PLL fix

  i915:
   - Clang build warning fix
   - HDCP fixes

  nouveau:
   - display fixes"

* tag 'drm-fixes-2020-09-04' of git://anongit.freedesktop.org/drm/drm:
  drm/nouveau/kms/nv50-gp1xx: add WAR for EVO push buffer HW bug
  drm/nouveau/kms/nv50-gp1xx: disable notifies again after core update
  drm/nouveau/kms/nv50-: add some whitespace before debug message
  drm/nouveau/kms/gv100-: Include correct push header in crcc37d.c
  drm/radeon: Prefer lower feedback dividers
  drm/amdgpu: Fix bug in reporting voltage for CIK
  drm/amdgpu: Specify get_argument function for ci_smu_funcs
  drm/amd/pm: enable MP0 DPM for sienna_cichlid
  drm/amd/pm: avoid false alarm due to confusing softwareshutdowntemp setting
  drm/amd/pm: fix is_dpm_running() run error on 32bit system
  drm/i915: Clear the repeater bit on HDCP disable
  drm/i915: Fix sha_text population code
  drm/i915/display: Ensure that ret is always initialized in icl_combo_phy_verify_state
parents acf69c94 d37d5692
...@@ -1840,10 +1840,14 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) ...@@ -1840,10 +1840,14 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
uint32_t feature_mask[2]; uint32_t feature_mask[2];
unsigned long feature_enabled; uint64_t feature_enabled;
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | if (ret)
((uint64_t)feature_mask[1] << 32)); return false;
feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
return !!(feature_enabled & SMC_DPM_FEATURE); return !!(feature_enabled & SMC_DPM_FEATURE);
} }
......
...@@ -3581,7 +3581,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, ...@@ -3581,7 +3581,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_GPU_POWER: case AMDGPU_PP_SENSOR_GPU_POWER:
return smu7_get_gpu_power(hwmgr, (uint32_t *)value); return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX: case AMDGPU_PP_SENSOR_VDDGFX:
if ((data->vr_config & 0xff) == 0x2) if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
(VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
else else
......
...@@ -374,8 +374,18 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, ...@@ -374,8 +374,18 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
/* compare them in unit celsius degree */ /* compare them in unit celsius degree */
if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
/*
* As a common sense, usSoftwareShutdownTemp should be bigger
* than ThotspotLimit. For any invalid usSoftwareShutdownTemp,
* we will just use the max possible setting VEGA10_THERMAL_MAXIMUM_ALERT_TEMP
* to avoid false alarms.
*/
if ((tdp_table->usSoftwareShutdownTemp >
range->hotspot_crit_max / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)) {
if (high > tdp_table->usSoftwareShutdownTemp) if (high > tdp_table->usSoftwareShutdownTemp)
high = tdp_table->usSoftwareShutdownTemp; high = tdp_table->usSoftwareShutdownTemp;
}
if (low > high) if (low > high)
return -EINVAL; return -EINVAL;
......
...@@ -1331,10 +1331,14 @@ static bool navi10_is_dpm_running(struct smu_context *smu) ...@@ -1331,10 +1331,14 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
uint32_t feature_mask[2]; uint32_t feature_mask[2];
unsigned long feature_enabled; uint64_t feature_enabled;
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | if (ret)
((uint64_t)feature_mask[1] << 32)); return false;
feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
return !!(feature_enabled & SMC_DPM_FEATURE); return !!(feature_enabled & SMC_DPM_FEATURE);
} }
......
...@@ -68,7 +68,8 @@ ...@@ -68,7 +68,8 @@
FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) | \
FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
...@@ -229,6 +230,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu, ...@@ -229,6 +230,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
| FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | FEATURE_MASK(FEATURE_DPM_FCLK_BIT)
| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT) | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT) | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_DS_FCLK_BIT) | FEATURE_MASK(FEATURE_DS_FCLK_BIT)
...@@ -1147,10 +1149,14 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu) ...@@ -1147,10 +1149,14 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
uint32_t feature_mask[2]; uint32_t feature_mask[2];
unsigned long feature_enabled; uint64_t feature_enabled;
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | if (ret)
((uint64_t)feature_mask[1] << 32)); return false;
feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
return !!(feature_enabled & SMC_DPM_FEATURE); return !!(feature_enabled & SMC_DPM_FEATURE);
} }
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "cgs_common.h" #include "cgs_common.h"
#include "atombios.h" #include "atombios.h"
#include "pppcielanes.h" #include "pppcielanes.h"
#include "smu7_smumgr.h"
#include "smu/smu_7_0_1_d.h" #include "smu/smu_7_0_1_d.h"
#include "smu/smu_7_0_1_sh_mask.h" #include "smu/smu_7_0_1_sh_mask.h"
...@@ -2948,6 +2949,7 @@ const struct pp_smumgr_func ci_smu_funcs = { ...@@ -2948,6 +2949,7 @@ const struct pp_smumgr_func ci_smu_funcs = {
.request_smu_load_specific_fw = NULL, .request_smu_load_specific_fw = NULL,
.send_msg_to_smc = ci_send_msg_to_smc, .send_msg_to_smc = ci_send_msg_to_smc,
.send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter, .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
.get_argument = smu7_get_argument,
.download_pptable_settings = NULL, .download_pptable_settings = NULL,
.upload_pptable_settings = NULL, .upload_pptable_settings = NULL,
.get_offsetof = ci_get_offsetof, .get_offsetof = ci_get_offsetof,
......
...@@ -258,7 +258,7 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy) ...@@ -258,7 +258,7 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum phy phy) enum phy phy)
{ {
bool ret; bool ret = true;
u32 expected_val = 0; u32 expected_val = 0;
if (!icl_combo_phy_enabled(dev_priv, phy)) if (!icl_combo_phy_enabled(dev_priv, phy))
...@@ -276,7 +276,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, ...@@ -276,7 +276,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
DCC_MODE_SELECT_CONTINUOSLY); DCC_MODE_SELECT_CONTINUOSLY);
} }
ret = cnl_verify_procmon_ref_values(dev_priv, phy); ret &= cnl_verify_procmon_ref_values(dev_priv, phy);
if (phy_is_master(dev_priv, phy)) { if (phy_is_master(dev_priv, phy)) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy), ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
......
...@@ -336,8 +336,10 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, ...@@ -336,8 +336,10 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
/* Fill up the empty slots in sha_text and write it out */ /* Fill up the empty slots in sha_text and write it out */
sha_empty = sizeof(sha_text) - sha_leftovers; sha_empty = sizeof(sha_text) - sha_leftovers;
for (j = 0; j < sha_empty; j++) for (j = 0; j < sha_empty; j++) {
sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8); u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
sha_text |= ksv[j] << off;
}
ret = intel_write_sha_text(dev_priv, sha_text); ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0) if (ret < 0)
...@@ -435,7 +437,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, ...@@ -435,7 +437,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
/* Write 32 bits of text */ /* Write 32 bits of text */
intel_de_write(dev_priv, HDCP_REP_CTL, intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32); rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0] << 24 | bstatus[1] << 16; sha_text |= bstatus[0] << 8 | bstatus[1];
ret = intel_write_sha_text(dev_priv, sha_text); ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -450,17 +452,29 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, ...@@ -450,17 +452,29 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
return ret; return ret;
sha_idx += sizeof(sha_text); sha_idx += sizeof(sha_text);
} }
/*
* Terminate the SHA-1 stream by hand. For the other leftover
* cases this is appended by the hardware.
*/
intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32);
sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
} else if (sha_leftovers == 3) { } else if (sha_leftovers == 3) {
/* Write 32 bits of text */ /* Write 32 bits of text (filled from LSB) */
intel_de_write(dev_priv, HDCP_REP_CTL, intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32); rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0] << 24; sha_text |= bstatus[0];
ret = intel_write_sha_text(dev_priv, sha_text); ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0) if (ret < 0)
return ret; return ret;
sha_idx += sizeof(sha_text); sha_idx += sizeof(sha_text);
/* Write 8 bits of text, 24 bits of M0 */ /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
intel_de_write(dev_priv, HDCP_REP_CTL, intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_8); rep_ctl | HDCP_SHA1_TEXT_8);
ret = intel_write_sha_text(dev_priv, bstatus[1]); ret = intel_write_sha_text(dev_priv, bstatus[1]);
...@@ -781,6 +795,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) ...@@ -781,6 +795,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
struct intel_hdcp *hdcp = &connector->hdcp; struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port; enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder; enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
u32 repeater_ctl;
int ret; int ret;
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n", drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
...@@ -796,6 +811,11 @@ static int _intel_hdcp_disable(struct intel_connector *connector) ...@@ -796,6 +811,11 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
port);
intel_de_write(dev_priv, HDCP_REP_CTL,
intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
ret = hdcp->shim->toggle_signalling(dig_port, false); ret = hdcp->shim->toggle_signalling(dig_port, false);
if (ret) { if (ret) {
drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n"); drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
......
...@@ -50,7 +50,10 @@ core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy) ...@@ -50,7 +50,10 @@ core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
interlock[NV50_DISP_INTERLOCK_OVLY] | interlock[NV50_DISP_INTERLOCK_OVLY] |
NVDEF(NV507D, UPDATE, NOT_DRIVER_FRIENDLY, FALSE) | NVDEF(NV507D, UPDATE, NOT_DRIVER_FRIENDLY, FALSE) |
NVDEF(NV507D, UPDATE, NOT_DRIVER_UNFRIENDLY, FALSE) | NVDEF(NV507D, UPDATE, NOT_DRIVER_UNFRIENDLY, FALSE) |
NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE)); NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE),
SET_NOTIFIER_CONTROL,
NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
return PUSH_KICK(push); return PUSH_KICK(push);
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include "disp.h" #include "disp.h"
#include "head.h" #include "head.h"
#include <nvif/push507c.h> #include <nvif/pushc37b.h>
#include <nvhw/class/clc37d.h> #include <nvhw/class/clc37d.h>
......
...@@ -257,6 +257,12 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, ...@@ -257,6 +257,12 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
dmac->push->end = dmac->push->bgn; dmac->push->end = dmac->push->bgn;
dmac->max = 0x1000/4 - 1; dmac->max = 0x1000/4 - 1;
/* EVO channels are affected by a HW bug where the last 12 DWORDs
* of the push buffer aren't able to be used safely.
*/
if (disp->oclass < GV100_DISP)
dmac->max -= 12;
args->pushbuf = nvif_handle(&dmac->_push.mem.object); args->pushbuf = nvif_handle(&dmac->_push.mem.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size, ret = nv50_chan_create(device, disp, oclass, head, data, size,
......
...@@ -20,6 +20,6 @@ ...@@ -20,6 +20,6 @@
PUSH_ASSERT(!((o) & ~DRF_SMASK(NV507C_DMA_JUMP_OFFSET)), "offset"); \ PUSH_ASSERT(!((o) & ~DRF_SMASK(NV507C_DMA_JUMP_OFFSET)), "offset"); \
PUSH_DATA__((p), NVDEF(NV507C, DMA, OPCODE, JUMP) | \ PUSH_DATA__((p), NVDEF(NV507C, DMA, OPCODE, JUMP) | \
NVVAL(NV507C, DMA, JUMP_OFFSET, (o) >> 2), \ NVVAL(NV507C, DMA, JUMP_OFFSET, (o) >> 2), \
"jump 0x%08x - %s", (u32)(o), __func__); \ " jump 0x%08x - %s", (u32)(o), __func__); \
} while(0) } while(0)
#endif #endif
...@@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, ...@@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
/* get matching reference and feedback divider */ /* get matching reference and feedback divider */
*ref_div = min(max(den/post_div, 1u), ref_div_max); *ref_div = min(max(den/post_div, 1u), ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); *fb_div = max(nom * *ref_div * post_div / den, 1u);
/* limit fb divider to its maximum */ /* limit fb divider to its maximum */
if (*fb_div > fb_div_max) { if (*fb_div > fb_div_max) {
......
...@@ -29,6 +29,9 @@ ...@@ -29,6 +29,9 @@
/* Slave address for the HDCP registers in the receiver */ /* Slave address for the HDCP registers in the receiver */
#define DRM_HDCP_DDC_ADDR 0x3A #define DRM_HDCP_DDC_ADDR 0x3A
/* Value to use at the end of the SHA-1 bytestream used for repeaters */
#define DRM_HDCP_SHA1_TERMINATOR 0x80
/* HDCP register offsets for HDMI/DVI devices */ /* HDCP register offsets for HDMI/DVI devices */
#define DRM_HDCP_DDC_BKSV 0x00 #define DRM_HDCP_DDC_BKSV 0x00
#define DRM_HDCP_DDC_RI_PRIME 0x08 #define DRM_HDCP_DDC_RI_PRIME 0x08
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment