Commit 8c9b2e32 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-3.14' of git://people.freedesktop.org/~agd5f/linux into drm-next

New tree with the INFO ioctl merge fixed up.  This also adds a couple
of additional minor fixes.

A few more changes for 3.14, mostly just bug fixes.  Note that:
drm/radeon: add query to fetch the max engine clock.
will conflict with 3.13 final, but the fix is pretty obvious.

* 'drm-next-3.14' of git://people.freedesktop.org/~agd5f/linux: (22 commits)
  drm/radeon: add UVD support for OLAND
  drm/radeon: fix minor typos in si_dpm.c
  drm/radeon: set the full cache bit for fences on r7xx+
  drm/radeon: fix surface sync in fence on cayman (v2)
  drm/radeon/dpm: disable mclk switching on desktop RV770
  drm/radeon: fix endian handling in radeon_atom_init_mc_reg_table
  drm/radeon: write gfx pg bases even when gfx pg is disabled
  drm/radeon: bail early from enable ss in certain cases
  drm/radeon: handle ss percentage divider properly
  drm/radeon: add query to fetch the max engine clock (v2)
  drm/radeon/dp: sleep after powering up the display
  drm/radeon/dp: use usleep_range rather than udelay
  drm/radeon/dp: bump i2c-over-aux retries to 7
  drm/radeon: disable ss on DP for DCE3.x
  drm/radeon/cik: use hw defaults for TC_CFG registers
  drm/radeon: disable dpm on BTC
  drm/radeon/cik: use WAIT_REG_MEM special op for CP HDP flush
  drm/radeon/cik: use POLL_REG_MEM special op for sDMA HDP flush
  drm/radeon: consolidate sdma hdp flushing code for CIK
  drm/radeon: consolidate cp hdp flushing code for CIK
  ...
parents cfd72a4c 5d029339
...@@ -423,7 +423,17 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, ...@@ -423,7 +423,17 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args; union atom_enable_ss args;
if (!enable) { if (enable) {
/* Don't mess with SS if percentage is 0 or external ss.
* SS is already disabled previously, and disabling it
* again can cause display problems if the pll is already
* programmed.
*/
if (ss->percentage == 0)
return;
if (ss->type & ATOM_EXTERNAL_SS_MASK)
return;
} else {
for (i = 0; i < rdev->num_crtc; i++) { for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] && if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled && rdev->mode_info.crtcs[i]->enabled &&
...@@ -459,8 +469,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, ...@@ -459,8 +469,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable; args.v3.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
args.v3.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE4(rdev)) { } else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
...@@ -480,8 +488,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, ...@@ -480,8 +488,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v2.ucEnable = enable; args.v2.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) { } else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
...@@ -503,8 +509,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, ...@@ -503,8 +509,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
args.lvds_ss_2.ucEnable = enable; args.lvds_ss_2.ucEnable = enable;
} else { } else {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || if (enable == ATOM_DISABLE) {
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
atombios_disable_ss(rdev, pll_id); atombios_disable_ss(rdev, pll_id);
return; return;
} }
...@@ -938,12 +943,15 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_ ...@@ -938,12 +943,15 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
radeon_atombios_get_ppll_ss_info(rdev, radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss, &radeon_crtc->ss,
ATOM_DP_SS_ID1); ATOM_DP_SS_ID1);
} else } else {
radeon_crtc->ss_enabled = radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss, &radeon_crtc->ss,
ATOM_DP_SS_ID1); ATOM_DP_SS_ID1);
} }
/* disable spread spectrum on DCE3 DP */
radeon_crtc->ss_enabled = false;
}
break; break;
case ATOM_ENCODER_MODE_LVDS: case ATOM_ENCODER_MODE_LVDS:
if (ASIC_IS_DCE4(rdev)) if (ASIC_IS_DCE4(rdev))
...@@ -1039,15 +1047,17 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode ...@@ -1039,15 +1047,17 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
/* calculate ss amount and step size */ /* calculate ss amount and step size */
if (ASIC_IS_DCE4(rdev)) { if (ASIC_IS_DCE4(rdev)) {
u32 step_size; u32 step_size;
u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000; u32 amount = (((fb_div * 10) + frac_fb_div) *
(u32)radeon_crtc->ss.percentage) /
(100 * (u32)radeon_crtc->ss.percentage_divider);
radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) / step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100); (125 * 25 * pll->reference_freq / 100);
else else
step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) / step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100); (125 * 25 * pll->reference_freq / 100);
radeon_crtc->ss.step = step_size; radeon_crtc->ss.step = step_size;
} }
......
...@@ -161,7 +161,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, ...@@ -161,7 +161,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
msg[3] = (msg_bytes << 4) | (send_bytes - 1); msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes); memcpy(&msg[4], send, send_bytes);
for (retry = 0; retry < 4; retry++) { for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack); msg, msg_bytes, NULL, 0, delay, &ack);
if (ret == -EBUSY) if (ret == -EBUSY)
...@@ -172,7 +172,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, ...@@ -172,7 +172,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
return send_bytes; return send_bytes;
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(400); usleep_range(400, 500);
else else
return -EIO; return -EIO;
} }
...@@ -195,7 +195,7 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, ...@@ -195,7 +195,7 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
msg[2] = DP_AUX_NATIVE_READ << 4; msg[2] = DP_AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1); msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
for (retry = 0; retry < 4; retry++) { for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack); msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == -EBUSY) if (ret == -EBUSY)
...@@ -206,7 +206,7 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, ...@@ -206,7 +206,7 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
return ret; return ret;
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(400); usleep_range(400, 500);
else if (ret == 0) else if (ret == 0)
return -EPROTO; return -EPROTO;
else else
...@@ -274,7 +274,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, ...@@ -274,7 +274,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break; break;
} }
for (retry = 0; retry < 4; retry++) { for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(auxch, ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack); msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret == -EBUSY) if (ret == -EBUSY)
...@@ -295,7 +295,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, ...@@ -295,7 +295,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO; return -EREMOTEIO;
case DP_AUX_NATIVE_REPLY_DEFER: case DP_AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch native defer\n"); DRM_DEBUG_KMS("aux_ch native defer\n");
udelay(400); usleep_range(500, 600);
continue; continue;
default: default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack); DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
...@@ -312,7 +312,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, ...@@ -312,7 +312,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO; return -EREMOTEIO;
case DP_AUX_I2C_REPLY_DEFER: case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n"); DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(400); usleep_range(400, 500);
break; break;
default: default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack); DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
...@@ -673,9 +673,11 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) ...@@ -673,9 +673,11 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
u8 tmp; u8 tmp;
/* power up the sink */ /* power up the sink */
if (dp_info->dpcd[0] >= 0x11) if (dp_info->dpcd[0] >= 0x11) {
radeon_write_dpcd_reg(dp_info->radeon_connector, radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_SET_POWER, DP_SET_POWER_D0); DP_SET_POWER, DP_SET_POWER_D0);
usleep_range(1000, 2000);
}
/* possibly enable downspread on the sink */ /* possibly enable downspread on the sink */
if (dp_info->dpcd[3] & 0x1) if (dp_info->dpcd[3] & 0x1)
......
...@@ -3486,6 +3486,51 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -3486,6 +3486,51 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r; return r;
} }
/**
* cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
*
* @rdev: radeon_device pointer
* @ridx: radeon ring index
*
* Emits an hdp flush on the cp.
*/
static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev,
int ridx)
{
struct radeon_ring *ring = &rdev->ring[ridx];
u32 ref_and_mask;
switch (ring->idx) {
case CAYMAN_RING_TYPE_CP1_INDEX:
case CAYMAN_RING_TYPE_CP2_INDEX:
default:
switch (ring->me) {
case 0:
ref_and_mask = CP2 << ring->pipe;
break;
case 1:
ref_and_mask = CP6 << ring->pipe;
break;
default:
return;
}
break;
case RADEON_RING_TYPE_GFX_INDEX:
ref_and_mask = CP0;
break;
}
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
WAIT_REG_MEM_FUNCTION(3) | /* == */
WAIT_REG_MEM_ENGINE(1))); /* pfp */
radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
radeon_ring_write(ring, ref_and_mask);
radeon_ring_write(ring, ref_and_mask);
radeon_ring_write(ring, 0x20); /* poll interval */
}
/** /**
* cik_fence_gfx_ring_emit - emit a fence on the gfx ring * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
* *
...@@ -3512,15 +3557,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, ...@@ -3512,15 +3557,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
/* HDP flush */ /* HDP flush */
/* We should be using the new WAIT_REG_MEM special op packet here cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
* but it causes the CP to hang
*/
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
} }
/** /**
...@@ -3550,15 +3587,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, ...@@ -3550,15 +3587,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
/* HDP flush */ /* HDP flush */
/* We should be using the new WAIT_REG_MEM special op packet here cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
* but it causes the CP to hang
*/
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
} }
bool cik_semaphore_ring_emit(struct radeon_device *rdev, bool cik_semaphore_ring_emit(struct radeon_device *rdev,
...@@ -3566,8 +3595,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -3566,8 +3595,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
{ {
/* TODO: figure out why semaphore cause lockups */
#if 0
uint64_t addr = semaphore->gpu_addr; uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
...@@ -3576,9 +3603,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -3576,9 +3603,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
return true; return true;
#else
return false;
#endif
} }
/** /**
...@@ -5329,20 +5353,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) ...@@ -5329,20 +5353,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
/* TC cache setup ??? */
WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
WREG32(TC_CFG_L1_STORE_POLICY, 0);
WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
WREG32(TC_CFG_L2_STORE_POLICY0, 0);
WREG32(TC_CFG_L2_STORE_POLICY1, 0);
WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
WREG32(TC_CFG_L1_VOLATILE, 0);
WREG32(TC_CFG_L2_VOLATILE, 0);
if (rdev->family == CHIP_KAVERI) { if (rdev->family == CHIP_KAVERI) {
u32 tmp = RREG32(CHUB_CONTROL); u32 tmp = RREG32(CHUB_CONTROL);
tmp &= ~BYPASS_VM; tmp &= ~BYPASS_VM;
...@@ -5558,16 +5568,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) ...@@ -5558,16 +5568,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, VMID(0)); radeon_ring_write(ring, VMID(0));
/* HDP flush */ /* HDP flush */
/* We should be using the WAIT_REG_MEM packet here like in cik_hdp_flush_cp_ring_emit(rdev, ridx);
* cik_fence_ring_emit(), but it causes the CP to hang in this
* context...
*/
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
/* bits 0-15 are the VM contexts0-15 */ /* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
......
...@@ -156,6 +156,35 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, ...@@ -156,6 +156,35 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
} }
/**
* cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
*
* @rdev: radeon_device pointer
* @ridx: radeon ring index
*
* Emit an hdp flush packet on the requested DMA ring.
*/
static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
int ridx)
{
struct radeon_ring *ring = &rdev->ring[ridx];
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
u32 ref_and_mask;
if (ridx == R600_RING_TYPE_DMA_INDEX)
ref_and_mask = SDMA0;
else
ref_and_mask = SDMA1;
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
radeon_ring_write(ring, ref_and_mask); /* reference */
radeon_ring_write(ring, ref_and_mask); /* mask */
radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
/** /**
* cik_sdma_fence_ring_emit - emit a fence on the DMA ring * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
* *
...@@ -180,12 +209,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev, ...@@ -180,12 +209,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
/* generate an interrupt */ /* generate an interrupt */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
/* flush HDP */ /* flush HDP */
/* We should be using the new POLL_REG_MEM special op packet here cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
* but it causes sDMA to hang sometimes
*/
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
} }
/** /**
...@@ -816,12 +840,7 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm ...@@ -816,12 +840,7 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
radeon_ring_write(ring, VMID(0)); radeon_ring_write(ring, VMID(0));
/* flush HDP */ /* flush HDP */
/* We should be using the new POLL_REG_MEM special op packet here cik_sdma_hdp_flush_ring_emit(rdev, ridx);
* but it causes sDMA to hang sometimes
*/
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
/* flush TLB */ /* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
......
...@@ -1331,13 +1331,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev, ...@@ -1331,13 +1331,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
{ {
struct radeon_ring *ring = &rdev->ring[fence->ring]; struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr; u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
/* flush read cache over gart for this vmid */ /* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */ radeon_ring_write(ring, 10); /* poll interval */
...@@ -1353,6 +1352,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev, ...@@ -1353,6 +1352,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_ring *ring = &rdev->ring[ib->ring]; struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
/* set to DX10/11 mode */ /* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
...@@ -1377,14 +1378,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -1377,14 +1378,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
(ib->vm ? (ib->vm->id << 24) : 0)); (ib->vm ? (ib->vm->id << 24) : 0));
/* flush read cache over gart for this vmid */ /* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */ radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
} }
static void cayman_cp_enable(struct radeon_device *rdev, bool enable) static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
......
...@@ -1154,6 +1154,7 @@ ...@@ -1154,6 +1154,7 @@
# define PACKET3_DB_ACTION_ENA (1 << 26) # define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27) # define PACKET3_SH_ACTION_ENA (1 << 27)
# define PACKET3_SX_ACTION_ENA (1 << 28) # define PACKET3_SX_ACTION_ENA (1 << 28)
# define PACKET3_ENGINE_ME (1 << 31)
#define PACKET3_ME_INITIALIZE 0x44 #define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45 #define PACKET3_COND_WRITE 0x45
......
...@@ -2706,14 +2706,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev, ...@@ -2706,14 +2706,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
struct radeon_ring *ring = &rdev->ring[fence->ring]; struct radeon_ring *ring = &rdev->ring[fence->ring];
u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
if (rdev->family >= CHIP_RV770)
cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
if (rdev->wb.use_event) { if (rdev->wb.use_event) {
u64 addr = rdev->fence_drv[fence->ring].gpu_addr; u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */ /* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | radeon_ring_write(ring, cp_coher_cntl);
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */ radeon_ring_write(ring, 10); /* poll interval */
...@@ -2727,9 +2730,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev, ...@@ -2727,9 +2730,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} else { } else {
/* flush read cache over gart */ /* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | radeon_ring_write(ring, cp_coher_cntl);
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */ radeon_ring_write(ring, 10); /* poll interval */
......
...@@ -1582,6 +1582,7 @@ ...@@ -1582,6 +1582,7 @@
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) # define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43 #define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
# define PACKET3_TC_ACTION_ENA (1 << 23) # define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24) # define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25) # define PACKET3_CB_ACTION_ENA (1 << 25)
......
...@@ -1511,6 +1511,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, ...@@ -1511,6 +1511,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage); le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
ss->type = ss_assign->v1.ucSpreadSpectrumMode; ss->type = ss_assign->v1.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz); ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
ss->percentage_divider = 100;
return true; return true;
} }
ss_assign = (union asic_ss_assignment *) ss_assign = (union asic_ss_assignment *)
...@@ -1528,6 +1529,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, ...@@ -1528,6 +1529,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage); le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
ss->type = ss_assign->v2.ucSpreadSpectrumMode; ss->type = ss_assign->v2.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz); ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
ss->percentage_divider = 100;
if ((crev == 2) && if ((crev == 2) &&
((id == ASIC_INTERNAL_ENGINE_SS) || ((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS))) (id == ASIC_INTERNAL_MEMORY_SS)))
...@@ -1549,6 +1551,11 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, ...@@ -1549,6 +1551,11 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage); le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
ss->type = ss_assign->v3.ucSpreadSpectrumMode; ss->type = ss_assign->v3.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz); ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
if (ss_assign->v3.ucSpreadSpectrumMode &
SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
ss->percentage_divider = 1000;
else
ss->percentage_divider = 100;
if ((id == ASIC_INTERNAL_ENGINE_SS) || if ((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS)) (id == ASIC_INTERNAL_MEMORY_SS))
ss->rate /= 100; ss->rate /= 100;
...@@ -3867,16 +3874,18 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev, ...@@ -3867,16 +3874,18 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
} }
reg_table->last = i; reg_table->last = i;
while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) && while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
(num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) { (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
t_mem_id = (u8)((*(u32 *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT); t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
>> MEM_ID_SHIFT);
if (module_index == t_mem_id) { if (module_index == t_mem_id) {
reg_table->mc_reg_table_entry[num_ranges].mclk_max = reg_table->mc_reg_table_entry[num_ranges].mclk_max =
(u32)((*(u32 *)reg_data & CLOCK_RANGE_MASK) >> CLOCK_RANGE_SHIFT); (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
>> CLOCK_RANGE_SHIFT);
for (i = 0, j = 1; i < reg_table->last; i++) { for (i = 0, j = 1; i < reg_table->last; i++) {
if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) { if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
(u32)*((u32 *)reg_data + j); (u32)le32_to_cpu(*((u32 *)reg_data + j));
j++; j++;
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) { } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
...@@ -3888,7 +3897,7 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev, ...@@ -3888,7 +3897,7 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)); ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
} }
if (*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
return -EINVAL; return -EINVAL;
reg_table->num_entries = num_ranges; reg_table->num_entries = num_ranges;
} else } else
......
...@@ -470,6 +470,13 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -470,6 +470,13 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
} }
break; break;
case RADEON_INFO_MAX_SCLK:
if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
rdev->pm.dpm_enabled)
*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
else
*value = rdev->pm.default_sclk * 10;
break;
default: default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request); DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL; return -EINVAL;
......
...@@ -291,6 +291,7 @@ struct radeon_tv_regs { ...@@ -291,6 +291,7 @@ struct radeon_tv_regs {
struct radeon_atom_ss { struct radeon_atom_ss {
uint16_t percentage; uint16_t percentage;
uint16_t percentage_divider;
uint8_t type; uint8_t type;
uint16_t step; uint16_t step;
uint8_t delay; uint8_t delay;
......
...@@ -924,6 +924,10 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) ...@@ -924,6 +924,10 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
if (rdev->asic->dpm.powergate_uvd) { if (rdev->asic->dpm.powergate_uvd) {
mutex_lock(&rdev->pm.mutex); mutex_lock(&rdev->pm.mutex);
/* don't powergate anything if we
have active but pause streams */
enable |= rdev->pm.dpm.sd > 0;
enable |= rdev->pm.dpm.hd > 0;
/* enable/disable UVD */ /* enable/disable UVD */
radeon_dpm_powergate_uvd(rdev, !enable); radeon_dpm_powergate_uvd(rdev, !enable);
mutex_unlock(&rdev->pm.mutex); mutex_unlock(&rdev->pm.mutex);
...@@ -1231,6 +1235,9 @@ int radeon_pm_init(struct radeon_device *rdev) ...@@ -1231,6 +1235,9 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RV670: case CHIP_RV670:
case CHIP_RS780: case CHIP_RS780:
case CHIP_RS880: case CHIP_RS880:
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
case CHIP_CAYMAN: case CHIP_CAYMAN:
/* DPM requires the RLC, RV770+ dGPU requires SMC */ /* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw) if (!rdev->rlc_fw)
...@@ -1256,9 +1263,6 @@ int radeon_pm_init(struct radeon_device *rdev) ...@@ -1256,9 +1263,6 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_PALM: case CHIP_PALM:
case CHIP_SUMO: case CHIP_SUMO:
case CHIP_SUMO2: case CHIP_SUMO2:
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
case CHIP_ARUBA: case CHIP_ARUBA:
case CHIP_TAHITI: case CHIP_TAHITI:
case CHIP_PITCAIRN: case CHIP_PITCAIRN:
......
...@@ -91,6 +91,7 @@ int radeon_uvd_init(struct radeon_device *rdev) ...@@ -91,6 +91,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_VERDE: case CHIP_VERDE:
case CHIP_PITCAIRN: case CHIP_PITCAIRN:
case CHIP_ARUBA: case CHIP_ARUBA:
case CHIP_OLAND:
fw_name = FIRMWARE_TAHITI; fw_name = FIRMWARE_TAHITI;
break; break;
...@@ -778,6 +779,8 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work) ...@@ -778,6 +779,8 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
&rdev->pm.dpm.hd);
radeon_dpm_enable_uvd(rdev, false); radeon_dpm_enable_uvd(rdev, false);
} else { } else {
radeon_set_uvd_clocks(rdev, 0, 0); radeon_set_uvd_clocks(rdev, 0, 0);
......
...@@ -2251,7 +2251,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, ...@@ -2251,7 +2251,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
pl->vddci = vddci; pl->vddci = vddci;
} }
if (rdev->family >= CHIP_BARTS) {
if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
...@@ -2259,7 +2258,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, ...@@ -2259,7 +2258,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
} }
}
} }
int rv7xx_parse_power_table(struct radeon_device *rdev) int rv7xx_parse_power_table(struct radeon_device *rdev)
...@@ -2538,6 +2536,12 @@ bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) ...@@ -2538,6 +2536,12 @@ bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
(rdev->pdev->subsystem_device == 0x1c42)) (rdev->pdev->subsystem_device == 0x1c42))
switch_limit = 200; switch_limit = 200;
/* RV770 */
/* mclk switching doesn't seem to work reliably on desktop RV770s */
if ((rdev->family == CHIP_RV770) &&
!(rdev->flags & RADEON_IS_MOBILITY))
switch_limit = 0xffffffff; /* disable mclk switching */
if (vblank_time < switch_limit) if (vblank_time < switch_limit)
return true; return true;
else else
......
...@@ -5488,6 +5488,9 @@ static void si_init_pg(struct radeon_device *rdev) ...@@ -5488,6 +5488,9 @@ static void si_init_pg(struct radeon_device *rdev)
si_init_ao_cu_mask(rdev); si_init_ao_cu_mask(rdev);
if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
si_init_gfx_cgpg(rdev); si_init_gfx_cgpg(rdev);
} else {
WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
} }
si_enable_dma_pg(rdev, true); si_enable_dma_pg(rdev, true);
si_enable_gfx_cgpg(rdev, true); si_enable_gfx_cgpg(rdev, true);
......
...@@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev, ...@@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false; enable_sq_ramping = false;
if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false; enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) { for (i = 0; i < state->performance_level_count; i++) {
...@@ -5413,7 +5413,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev, ...@@ -5413,7 +5413,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
if (si_pi->mc_reg_table.valid_flag & (1 << j)) { if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
break; break;
mc_reg_table->address[i].s0 = mc_reg_table->address[i].s0 =
cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
......
...@@ -153,6 +153,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev) ...@@ -153,6 +153,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
chip_id = 0x01000015; chip_id = 0x01000015;
break; break;
case CHIP_PITCAIRN: case CHIP_PITCAIRN:
case CHIP_OLAND:
chip_id = 0x01000016; chip_id = 0x01000016;
break; break;
case CHIP_ARUBA: case CHIP_ARUBA:
......
...@@ -985,6 +985,8 @@ struct drm_radeon_cs { ...@@ -985,6 +985,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
/* query the number of render backends */ /* query the number of render backends */
#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 #define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
/* max engine clock - needed for OpenCL */
#define RADEON_INFO_MAX_SCLK 0x1a
struct drm_radeon_info { struct drm_radeon_info {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment