Commit 162b20d2 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux into drm-next

A few more patches for 4.8.  Mostly bug fixes and some prep work
for iceland powerplay support.  I have a couple polaris patches and
Edward's misc cleanups that require a merge with Linus'.  I don't know
if you are planning a merge anytime soon.

[airlied: fixed up endian vs 32-bit change in ppatomctrl]

* 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux: (26 commits)
  drm/amdgpu: comment out unused defaults_bonaire_pro static const structures to fix the build
  drm/amdgpu: temporary comment out unused static const structures to fix the build
  drm/amdgpu: S3 resume fail on Polaris10
  drm/amd/powerplay: add pp_tables_get_response_times function in process pptables
  drm/amd/powerplay: fix the incorrect return value
  drm/amd/powerplay: add atomctrl_get_voltage_evv function in ppatomctrl
  drm/amdgpu: add new definitions into ppsmc.h for iceland
  drm/amd/powerplay: add SMU register macro for future use
  drm/amdgpu: add ucode_start_address into cgs_firmware_info
  drm/amdgpu: no need load microcode at sdma if powerplay is enabled
  drm/amdgpu: rename smumgr to smum for dpm
  drm/amdgpu: disable GFX PG on CZ/BR/ST
  drivers: gpu: drm: amd: powerplay: hwmgr: Remove unused variable
  drm/amdgpu: return -ENOSPC when running out of UVD handles
  drm/amdgpu: trace need_flush in grab_vm as well
  drm/amdgpu: always signal all fences
  drm/amdgpu: check flush fence context instead of same ring v2
  drm/radeon: support backlight control for UNIPHY3
  drm/amdgpu: support backlight control for UNIPHY3
  drm/amdgpu: remove usec timeout loop from IB tests
  ...
parents c3f8d864 5ef82929
...@@ -752,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ...@@ -752,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) { if (!adev->pm.fw) {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_TOPAZ:
strcpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA: case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin"); strcpy(fw_name, "amdgpu/tonga_smc.bin");
break; break;
...@@ -800,6 +803,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ...@@ -800,6 +803,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->version = adev->pm.fw_version; info->version = adev->pm.fw_version;
info->image_size = ucode_size; info->image_size = ucode_size;
info->ucode_start_address = ucode_start_address;
info->kptr = (void *)src; info->kptr = (void *)src;
} }
return 0; return 0;
......
...@@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) ...@@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (seq != ring->fence_drv.sync_seq) if (seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring); amdgpu_fence_schedule_fallback(ring);
while (last_seq != seq) { if (unlikely(seq == last_seq))
return;
last_seq &= drv->num_fences_mask;
seq &= drv->num_fences_mask;
do {
struct fence *fence, **ptr; struct fence *fence, **ptr;
ptr = &drv->fences[++last_seq & drv->num_fences_mask]; ++last_seq;
last_seq &= drv->num_fences_mask;
ptr = &drv->fences[last_seq];
/* There is always exactly one thread signaling this fence slot */ /* There is always exactly one thread signaling this fence slot */
fence = rcu_dereference_protected(*ptr, 1); fence = rcu_dereference_protected(*ptr, 1);
RCU_INIT_POINTER(*ptr, NULL); RCU_INIT_POINTER(*ptr, NULL);
BUG_ON(!fence); if (!fence)
continue;
r = fence_signal(fence); r = fence_signal(fence);
if (!r) if (!r)
...@@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) ...@@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
BUG(); BUG();
fence_put(fence); fence_put(fence);
} } while (last_seq != seq);
} }
/** /**
......
...@@ -122,7 +122,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -122,7 +122,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
bool skip_preamble, need_ctx_switch; bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0; unsigned patch_offset = ~0;
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
struct fence *hwf;
uint64_t ctx; uint64_t ctx;
unsigned i; unsigned i;
...@@ -190,7 +189,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -190,7 +189,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_hdp_invalidate) if (ring->funcs->emit_hdp_invalidate)
amdgpu_ring_emit_hdp_invalidate(ring); amdgpu_ring_emit_hdp_invalidate(ring);
r = amdgpu_fence_emit(ring, &hwf); r = amdgpu_fence_emit(ring, f);
if (r) { if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r); dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id) if (job && job->vm_id)
...@@ -205,9 +204,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -205,9 +204,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
AMDGPU_FENCE_FLAG_64BIT); AMDGPU_FENCE_FLAG_64BIT);
} }
if (f)
*f = fence_get(hwf);
if (patch_offset != ~0 && ring->funcs->patch_cond_exec) if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset); amdgpu_ring_patch_cond_exec(ring, patch_offset);
......
...@@ -172,15 +172,13 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) ...@@ -172,15 +172,13 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
trace_amdgpu_sched_run_job(job); trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, job, &fence); job->sync.last_vm_update, job, &fence);
if (r) { if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r); DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;
}
err:
/* if gpu reset, hw fence will be replaced here */ /* if gpu reset, hw fence will be replaced here */
fence_put(job->fence); fence_put(job->fence);
job->fence = fence; job->fence = fence_get(fence);
amdgpu_job_free_resources(job);
return fence; return fence;
} }
......
...@@ -149,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job, ...@@ -149,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job,
TRACE_EVENT(amdgpu_vm_grab_id, TRACE_EVENT(amdgpu_vm_grab_id,
TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
uint64_t pd_addr), TP_ARGS(vm, ring, job),
TP_ARGS(vm, ring, vmid, pd_addr),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm) __field(struct amdgpu_vm *, vm)
__field(u32, ring) __field(u32, ring)
__field(u32, vmid) __field(u32, vmid)
__field(u64, pd_addr) __field(u64, pd_addr)
__field(u32, needs_flush)
), ),
TP_fast_assign( TP_fast_assign(
__entry->vm = vm; __entry->vm = vm;
__entry->ring = ring; __entry->ring = ring;
__entry->vmid = vmid; __entry->vmid = job->vm_id;
__entry->pd_addr = pd_addr; __entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
), ),
TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
__entry->ring, __entry->vmid, __entry->pd_addr) __entry->vm, __entry->ring, __entry->vmid,
__entry->pd_addr, __entry->needs_flush)
); );
TRACE_EVENT(amdgpu_vm_bo_map, TRACE_EVENT(amdgpu_vm_bo_map,
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include "uvd/uvd_4_2_d.h" #include "uvd/uvd_4_2_d.h"
/* 1 second timeout */ /* 1 second timeout */
#define UVD_IDLE_TIMEOUT_MS 1000 #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Polaris10/11 firmware version */ /* Polaris10/11 firmware version */
#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
...@@ -662,7 +662,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -662,7 +662,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
} }
DRM_ERROR("No more free UVD handles!\n"); DRM_ERROR("No more free UVD handles!\n");
return -EINVAL; return -ENOSPC;
case 1: case 1:
/* it's a decode msg, calc buffer sizes */ /* it's a decode msg, calc buffer sizes */
...@@ -968,7 +968,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -968,7 +968,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) { if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f; job->fence = fence_get(f);
if (r) if (r)
goto err_free; goto err_free;
...@@ -1114,8 +1114,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) ...@@ -1114,8 +1114,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
amdgpu_asic_set_uvd_clocks(adev, 0, 0); amdgpu_asic_set_uvd_clocks(adev, 0, 0);
} }
} else { } else {
schedule_delayed_work(&adev->uvd.idle_work, schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
} }
} }
...@@ -1123,7 +1122,7 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) ...@@ -1123,7 +1122,7 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
{ {
bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); UVD_IDLE_TIMEOUT);
if (set_clocks) { if (set_clocks) {
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#include "cikd.h" #include "cikd.h"
/* 1 second timeout */ /* 1 second timeout */
#define VCE_IDLE_TIMEOUT_MS 1000 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */ /* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
...@@ -310,8 +310,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) ...@@ -310,8 +310,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_asic_set_vce_clocks(adev, 0, 0); amdgpu_asic_set_vce_clocks(adev, 0, 0);
} }
} else { } else {
schedule_delayed_work(&adev->vce.idle_work, schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
} }
} }
...@@ -324,17 +323,12 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) ...@@ -324,17 +323,12 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
*/ */
static void amdgpu_vce_note_usage(struct amdgpu_device *adev) static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
{ {
bool streams_changed = false;
bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
if (adev->pm.dpm_enabled) { set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
/* XXX figure out if the streams changed */ VCE_IDLE_TIMEOUT);
streams_changed = false;
}
if (set_clocks || streams_changed) { if (set_clocks) {
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, true); amdgpu_dpm_enable_vce(adev, true);
} else { } else {
...@@ -357,6 +351,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) ...@@ -357,6 +351,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
int i, r; int i, r;
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
uint32_t handle = atomic_read(&adev->vce.handles[i]); uint32_t handle = atomic_read(&adev->vce.handles[i]);
if (!handle || adev->vce.filp[i] != filp) if (!handle || adev->vce.filp[i] != filp)
continue; continue;
...@@ -437,7 +432,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -437,7 +432,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0; ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f; job->fence = fence_get(f);
if (r) if (r)
goto err; goto err;
...@@ -499,7 +494,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -499,7 +494,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) { if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f; job->fence = fence_get(f);
if (r) if (r)
goto err; goto err;
...@@ -580,12 +575,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, ...@@ -580,12 +575,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
* we we don't have another free session index. * we we don't have another free session index.
*/ */
static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
uint32_t handle, bool *allocated) uint32_t handle, uint32_t *allocated)
{ {
unsigned i; unsigned i;
*allocated = false;
/* validate the handle */ /* validate the handle */
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
if (atomic_read(&p->adev->vce.handles[i]) == handle) { if (atomic_read(&p->adev->vce.handles[i]) == handle) {
...@@ -602,7 +595,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, ...@@ -602,7 +595,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
p->adev->vce.filp[i] = p->filp; p->adev->vce.filp[i] = p->filp;
p->adev->vce.img_size[i] = 0; p->adev->vce.img_size[i] = 0;
*allocated = true; *allocated |= 1 << i;
return i; return i;
} }
} }
...@@ -622,9 +615,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -622,9 +615,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
unsigned fb_idx = 0, bs_idx = 0; unsigned fb_idx = 0, bs_idx = 0;
int session_idx = -1; int session_idx = -1;
bool destroyed = false; uint32_t destroyed = 0;
bool created = false; uint32_t created = 0;
bool allocated = false; uint32_t allocated = 0;
uint32_t tmp, handle = 0; uint32_t tmp, handle = 0;
uint32_t *size = &tmp; uint32_t *size = &tmp;
int i, r = 0, idx = 0; int i, r = 0, idx = 0;
...@@ -641,30 +634,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -641,30 +634,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out; goto out;
} }
if (destroyed) {
DRM_ERROR("No other command allowed after destroy!\n");
r = -EINVAL;
goto out;
}
switch (cmd) { switch (cmd) {
case 0x00000001: // session case 0x00000001: /* session */
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle, session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated); &allocated);
if (session_idx < 0) if (session_idx < 0) {
return session_idx; r = session_idx;
goto out;
}
size = &p->adev->vce.img_size[session_idx]; size = &p->adev->vce.img_size[session_idx];
break; break;
case 0x00000002: // task info case 0x00000002: /* task info */
fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
break; break;
case 0x01000001: // create case 0x01000001: /* create */
created = true; created |= 1 << session_idx;
if (!allocated) { if (destroyed & (1 << session_idx)) {
destroyed &= ~(1 << session_idx);
allocated |= 1 << session_idx;
} else if (!(allocated & (1 << session_idx))) {
DRM_ERROR("Handle already in use!\n"); DRM_ERROR("Handle already in use!\n");
r = -EINVAL; r = -EINVAL;
goto out; goto out;
...@@ -675,16 +668,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -675,16 +668,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
8 * 3 / 2; 8 * 3 / 2;
break; break;
case 0x04000001: // config extension case 0x04000001: /* config extension */
case 0x04000002: // pic control case 0x04000002: /* pic control */
case 0x04000005: // rate control case 0x04000005: /* rate control */
case 0x04000007: // motion estimation case 0x04000007: /* motion estimation */
case 0x04000008: // rdo case 0x04000008: /* rdo */
case 0x04000009: // vui case 0x04000009: /* vui */
case 0x05000002: // auxiliary buffer case 0x05000002: /* auxiliary buffer */
break; break;
case 0x03000001: // encode case 0x03000001: /* encode */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
*size, 0); *size, 0);
if (r) if (r)
...@@ -696,18 +689,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -696,18 +689,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out; goto out;
break; break;
case 0x02000001: // destroy case 0x02000001: /* destroy */
destroyed = true; destroyed |= 1 << session_idx;
break; break;
case 0x05000001: // context buffer case 0x05000001: /* context buffer */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
*size * 2, 0); *size * 2, 0);
if (r) if (r)
goto out; goto out;
break; break;
case 0x05000004: // video bitstream buffer case 0x05000004: /* video bitstream buffer */
tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
tmp, bs_idx); tmp, bs_idx);
...@@ -715,7 +708,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -715,7 +708,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out; goto out;
break; break;
case 0x05000005: // feedback buffer case 0x05000005: /* feedback buffer */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
4096, fb_idx); 4096, fb_idx);
if (r) if (r)
...@@ -737,21 +730,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -737,21 +730,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
idx += len / 4; idx += len / 4;
} }
if (allocated && !created) { if (allocated & ~created) {
DRM_ERROR("New session without create command!\n"); DRM_ERROR("New session without create command!\n");
r = -ENOENT; r = -ENOENT;
} }
out: out:
if ((!r && destroyed) || (r && allocated)) { if (!r) {
/* /* No error, free all destroyed handle slots */
* IB contains a destroy msg or we have allocated an tmp = destroyed;
* handle and got an error, anyway free the handle } else {
*/ /* Error during parsing, free all allocated handle slots */
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) tmp = allocated;
atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
} }
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
if (tmp & (1 << i))
atomic_set(&p->adev->vce.handles[i], 0);
return r; return r;
} }
......
...@@ -195,6 +195,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -195,6 +195,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job) struct amdgpu_job *job)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
uint64_t fence_context = adev->fence_context + ring->idx;
struct fence *updates = sync->last_vm_update; struct fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle; struct amdgpu_vm_id *id, *idle;
struct fence **fences; struct fence **fences;
...@@ -254,7 +255,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -254,7 +255,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
i = ring->idx; i = ring->idx;
do { do {
struct fence *flushed; struct fence *flushed;
bool same_ring = ring->idx == i;
id = vm->ids[i++]; id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS) if (i == AMDGPU_MAX_RINGS)
...@@ -272,8 +272,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -272,8 +272,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (job->vm_pd_addr != id->pd_gpu_addr) if (job->vm_pd_addr != id->pd_gpu_addr)
continue; continue;
if (!same_ring && if (!id->last_flush)
(!id->last_flush || !fence_is_signaled(id->last_flush))) continue;
if (id->last_flush->context != fence_context &&
!fence_is_signaled(id->last_flush))
continue; continue;
flushed = id->flushed_updates; flushed = id->flushed_updates;
...@@ -294,7 +297,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -294,7 +297,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
job->vm_id = id - adev->vm_manager.ids; job->vm_id = id - adev->vm_manager.ids;
job->vm_needs_flush = false; job->vm_needs_flush = false;
trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr); trace_amdgpu_vm_grab_id(vm, ring->idx, job);
mutex_unlock(&adev->vm_manager.lock); mutex_unlock(&adev->vm_manager.lock);
return 0; return 0;
...@@ -325,7 +328,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -325,7 +328,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
vm->ids[ring->idx] = id; vm->ids[ring->idx] = id;
job->vm_id = id - adev->vm_manager.ids; job->vm_id = id - adev->vm_manager.ids;
trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr); trace_amdgpu_vm_grab_id(vm, ring->idx, job);
error: error:
mutex_unlock(&adev->vm_manager.lock); mutex_unlock(&adev->vm_manager.lock);
......
...@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode ...@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0) if (dig->backlight_level == 0)
amdgpu_atombios_encoder_setup_dig_transmitter(encoder, amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
......
...@@ -86,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt = ...@@ -86,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
}; };
#if 0
static const struct ci_pt_defaults defaults_bonaire_pro = static const struct ci_pt_defaults defaults_bonaire_pro =
{ {
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
{ 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
}; };
#endif
static const struct ci_pt_defaults defaults_saturn_xt = static const struct ci_pt_defaults defaults_saturn_xt =
{ {
......
...@@ -622,7 +622,6 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ...@@ -622,7 +622,6 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib; struct amdgpu_ib ib;
struct fence *f = NULL; struct fence *f = NULL;
unsigned i;
unsigned index; unsigned index;
int r; int r;
u32 tmp = 0; u32 tmp = 0;
...@@ -644,7 +643,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ...@@ -644,7 +643,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
goto err0; goto err0;
} }
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib.ptr[1] = lower_32_bits(gpu_addr); ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr); ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = 1; ib.ptr[3] = 1;
...@@ -659,23 +659,15 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ...@@ -659,23 +659,15 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto err1; goto err1;
} }
for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]);
tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) {
if (tmp == 0xDEADBEEF) DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
break;
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
ring->idx, i);
goto err1;
} else { } else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL; r = -EINVAL;
} }
err1: err1:
fence_put(f);
amdgpu_ib_free(adev, &ib, NULL); amdgpu_ib_free(adev, &ib, NULL);
fence_put(f); fence_put(f);
err0: err0:
......
...@@ -2112,7 +2112,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -2112,7 +2112,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
struct fence *f = NULL; struct fence *f = NULL;
uint32_t scratch; uint32_t scratch;
uint32_t tmp = 0; uint32_t tmp = 0;
unsigned i;
int r; int r;
r = amdgpu_gfx_scratch_get(adev, &scratch); r = amdgpu_gfx_scratch_get(adev, &scratch);
...@@ -2141,16 +2140,9 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -2141,16 +2140,9 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto err2; goto err2;
} }
for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch);
tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) {
if (tmp == 0xDEADBEEF) DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
break;
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
ring->idx, i);
goto err2;
} else { } else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp); scratch, tmp);
...@@ -2158,7 +2150,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -2158,7 +2150,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
} }
err2: err2:
fence_put(f);
amdgpu_ib_free(adev, &ib, NULL); amdgpu_ib_free(adev, &ib, NULL);
fence_put(f); fence_put(f);
err1: err1:
......
...@@ -794,7 +794,6 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -794,7 +794,6 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
struct fence *f = NULL; struct fence *f = NULL;
uint32_t scratch; uint32_t scratch;
uint32_t tmp = 0; uint32_t tmp = 0;
unsigned i;
int r; int r;
r = amdgpu_gfx_scratch_get(adev, &scratch); r = amdgpu_gfx_scratch_get(adev, &scratch);
...@@ -823,23 +822,15 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -823,23 +822,15 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto err2; goto err2;
} }
for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch);
tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) {
if (tmp == 0xDEADBEEF) DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
break;
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
ring->idx, i);
goto err2;
} else { } else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp); scratch, tmp);
r = -EINVAL; r = -EINVAL;
} }
err2: err2:
fence_put(f);
amdgpu_ib_free(adev, &ib, NULL); amdgpu_ib_free(adev, &ib, NULL);
fence_put(f); fence_put(f);
err1: err1:
...@@ -1729,7 +1720,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ...@@ -1729,7 +1720,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
RREG32(sec_ded_counter_registers[i]); RREG32(sec_ded_counter_registers[i]);
fail: fail:
fence_put(f);
amdgpu_ib_free(adev, &ib, NULL); amdgpu_ib_free(adev, &ib, NULL);
fence_put(f); fence_put(f);
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include "drmP.h" #include "drmP.h"
#include "amdgpu.h" #include "amdgpu.h"
#include "iceland_smumgr.h" #include "iceland_smum.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "drmP.h" #include "drmP.h"
#include "amdgpu.h" #include "amdgpu.h"
#include "ppsmc.h" #include "ppsmc.h"
#include "iceland_smumgr.h" #include "iceland_smum.h"
#include "smu_ucode_xfer_vi.h" #include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h" #include "amdgpu_ucode.h"
......
...@@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, ...@@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
vid_mapping_table->num_entries = i; vid_mapping_table->num_entries = i;
} }
#if 0
static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
{ {
{ 0, 4, 1 }, { 0, 4, 1 },
...@@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] = ...@@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
{ {
{ 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
}; };
#endif
static const struct kv_pt_config_reg didt_config_kv[] = static const struct kv_pt_config_reg didt_config_kv[] =
{ {
......
...@@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result; ...@@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_StartFanControl ((uint8_t)0x5B) #define PPSMC_StartFanControl ((uint8_t)0x5B)
#define PPSMC_StopFanControl ((uint8_t)0x5C) #define PPSMC_StopFanControl ((uint8_t)0x5C)
#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) #define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
#define PPSMC_NoDisplay ((uint8_t)0x5D)
#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) #define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
#define PPSMC_HasDisplay ((uint8_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) #define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) #define PPSMC_MSG_UVDPowerON ((uint8_t)0x61)
#define PPSMC_MSG_EnableULV ((uint8_t)0x62) #define PPSMC_MSG_EnableULV ((uint8_t)0x62)
...@@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result; ...@@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_DisableDTE ((uint8_t)0x88) #define PPSMC_MSG_DisableDTE ((uint8_t)0x88)
#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) #define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) #define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
/* CI/KV/KB */ /* CI/KV/KB */
#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) #define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
...@@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result; ...@@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) #define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) #define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) #define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) #define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) #define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
......
...@@ -567,19 +567,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) ...@@ -567,19 +567,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
{ {
int r; int r;
if (!adev->firmware.smu_load) { if (!adev->pp_enabled) {
r = sdma_v2_4_load_microcode(adev); if (!adev->firmware.smu_load) {
if (r) r = sdma_v2_4_load_microcode(adev);
return r; if (r)
} else { return r;
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, } else {
AMDGPU_UCODE_ID_SDMA0); r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
if (r) AMDGPU_UCODE_ID_SDMA0);
return -EINVAL; if (r)
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, return -EINVAL;
AMDGPU_UCODE_ID_SDMA1); r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
if (r) AMDGPU_UCODE_ID_SDMA1);
return -EINVAL; if (r)
return -EINVAL;
}
} }
/* halt the engine before programing */ /* halt the engine before programing */
...@@ -671,7 +673,6 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ...@@ -671,7 +673,6 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib; struct amdgpu_ib ib;
struct fence *f = NULL; struct fence *f = NULL;
unsigned i;
unsigned index; unsigned index;
int r; int r;
u32 tmp = 0; u32 tmp = 0;
...@@ -713,23 +714,15 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ...@@ -713,23 +714,15 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto err1; goto err1;
} }
for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]);
tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) {
if (tmp == 0xDEADBEEF) DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
break;
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
ring->idx, i);
goto err1;
} else { } else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL; r = -EINVAL;
} }
err1: err1:
fence_put(f);
amdgpu_ib_free(adev, &ib, NULL); amdgpu_ib_free(adev, &ib, NULL);
fence_put(f); fence_put(f);
err0: err0:
......
...@@ -901,7 +901,6 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -901,7 +901,6 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib; struct amdgpu_ib ib;
struct fence *f = NULL; struct fence *f = NULL;
unsigned i;
unsigned index; unsigned index;
int r; int r;
u32 tmp = 0; u32 tmp = 0;
...@@ -943,22 +942,14 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -943,22 +942,14 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto err1; goto err1;
} }
for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]);
tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) {
if (tmp == 0xDEADBEEF) DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
break;
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
ring->idx, i);
goto err1;
} else { } else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL; r = -EINVAL;
} }
err1: err1:
fence_put(f);
amdgpu_ib_free(adev, &ib, NULL); amdgpu_ib_free(adev, &ib, NULL);
fence_put(f); fence_put(f);
err0: err0:
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_FW_SIZE (384 * 1024)
#define VCE_V3_0_STACK_SIZE (64 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024)
...@@ -51,6 +52,7 @@ ...@@ -51,6 +52,7 @@
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
static int vce_v3_0_wait_for_idle(void *handle);
/** /**
* vce_v3_0_ring_get_rptr - get read pointer * vce_v3_0_ring_get_rptr - get read pointer
...@@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, ...@@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
vce_v3_0_override_vce_clock_gating(adev, false); vce_v3_0_override_vce_clock_gating(adev, false);
} }
static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
{
int i, j;
uint32_t status = 0;
for (i = 0; i < 10; ++i) {
for (j = 0; j < 100; ++j) {
status = RREG32(mmVCE_STATUS);
if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
return 0;
mdelay(10);
}
DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
WREG32_P(mmVCE_SOFT_RESET,
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10);
WREG32_P(mmVCE_SOFT_RESET, 0,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10);
}
return -ETIMEDOUT;
}
/** /**
* vce_v3_0_start - start VCE block * vce_v3_0_start - start VCE block
* *
...@@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, ...@@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
static int vce_v3_0_start(struct amdgpu_device *adev) static int vce_v3_0_start(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
int idx, i, j, r; int idx, r;
ring = &adev->vce.ring[0];
WREG32(mmVCE_RB_RPTR, ring->wptr);
WREG32(mmVCE_RB_WPTR, ring->wptr);
WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
ring = &adev->vce.ring[1];
WREG32(mmVCE_RB_RPTR2, ring->wptr);
WREG32(mmVCE_RB_WPTR2, ring->wptr);
WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) { for (idx = 0; idx < 2; ++idx) {
if (adev->vce.harvest_config & (1 << idx)) if (adev->vce.harvest_config & (1 << idx))
continue; continue;
...@@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev) ...@@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
vce_v3_0_mc_resume(adev, idx); vce_v3_0_mc_resume(adev, idx);
/* set BUSY flag */ WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
WREG32_P(mmVCE_STATUS, 1, ~1); ~VCE_STATUS__JOB_BUSY_MASK);
if (adev->asic_type >= CHIP_STONEY) if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
else else
WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
~VCE_VCPU_CNTL__CLK_EN_MASK); ~VCE_VCPU_CNTL__CLK_EN_MASK);
WREG32_P(mmVCE_SOFT_RESET,
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(100);
WREG32_P(mmVCE_SOFT_RESET, 0, WREG32_P(mmVCE_SOFT_RESET, 0,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
for (i = 0; i < 10; ++i) { mdelay(100);
uint32_t status;
for (j = 0; j < 100; ++j) { r = vce_v3_0_firmware_loaded(adev);
status = RREG32(mmVCE_STATUS);
if (status & 2)
break;
mdelay(10);
}
r = 0;
if (status & 2)
break;
DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
WREG32_P(mmVCE_SOFT_RESET,
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10);
WREG32_P(mmVCE_SOFT_RESET, 0,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10);
r = -1;
}
/* clear BUSY flag */ /* clear BUSY flag */
WREG32_P(mmVCE_STATUS, 0, ~1); WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
/* Set Clock-Gating off */ /* Set Clock-Gating off */
if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
...@@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev) ...@@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
ring = &adev->vce.ring[0]; return 0;
WREG32(mmVCE_RB_RPTR, ring->wptr); }
WREG32(mmVCE_RB_WPTR, ring->wptr);
WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
ring = &adev->vce.ring[1]; static int vce_v3_0_stop(struct amdgpu_device *adev)
WREG32(mmVCE_RB_RPTR2, ring->wptr); {
WREG32(mmVCE_RB_WPTR2, ring->wptr); int idx;
WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); mutex_lock(&adev->grbm_idx_mutex);
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); for (idx = 0; idx < 2; ++idx) {
if (adev->vce.harvest_config & (1 << idx))
continue;
if (idx == 0)
WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
else
WREG32_P(mmGRBM_GFX_INDEX,
GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
else
WREG32_P(mmVCE_VCPU_CNTL, 0,
~VCE_VCPU_CNTL__CLK_EN_MASK);
/* hold on ECPU */
WREG32_P(mmVCE_SOFT_RESET,
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
/* clear BUSY flag */
WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
/* Set Clock-Gating off */
if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
vce_v3_0_set_vce_sw_clock_gating(adev, false);
}
WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
mutex_unlock(&adev->grbm_idx_mutex);
return 0; return 0;
} }
...@@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle) ...@@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle)
static int vce_v3_0_hw_fini(void *handle) static int vce_v3_0_hw_fini(void *handle)
{ {
return 0; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = vce_v3_0_wait_for_idle(handle);
if (r)
return r;
return vce_v3_0_stop(adev);
} }
static int vce_v3_0_suspend(void *handle) static int vce_v3_0_suspend(void *handle)
......
...@@ -1249,15 +1249,7 @@ static int vi_common_early_init(void *handle) ...@@ -1249,15 +1249,7 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS; AMD_CG_SUPPORT_SDMA_LS;
/* rev0 hardware doesn't support PG */
adev->pg_flags = 0; adev->pg_flags = 0;
if (adev->rev_id != 0x00)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_GFX_PIPELINE;
adev->external_rev_id = adev->rev_id + 0x1; adev->external_rev_id = adev->rev_id + 0x1;
break; break;
case CHIP_STONEY: case CHIP_STONEY:
...@@ -1276,12 +1268,6 @@ static int vi_common_early_init(void *handle) ...@@ -1276,12 +1268,6 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS; AMD_CG_SUPPORT_SDMA_LS;
adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
adev->external_rev_id = adev->rev_id + 0x1; adev->external_rev_id = adev->rev_id + 0x1;
break; break;
default: default:
......
...@@ -160,6 +160,10 @@ struct cgs_firmware_info { ...@@ -160,6 +160,10 @@ struct cgs_firmware_info {
uint16_t feature_version; uint16_t feature_version;
uint32_t image_size; uint32_t image_size;
uint64_t mc_addr; uint64_t mc_addr;
/* only for smc firmware */
uint32_t ucode_start_address;
void *kptr; void *kptr;
}; };
......
...@@ -1828,7 +1828,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) ...@@ -1828,7 +1828,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
{ {
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; uint8_t i, stretch_amount, volt_offset = 0;
struct phm_ppt_v1_information *table_info = struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable); (struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
...@@ -1879,11 +1879,8 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) ...@@ -1879,11 +1879,8 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */ /* Populate CKS Lookup Table */
if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
stretch_amount2 = 0; stretch_amount != 4 && stretch_amount != 5) {
else if (stretch_amount == 3 || stretch_amount == 4)
stretch_amount2 = 1;
else {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher); PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false, PP_ASSERT_WITH_CODE(false,
......
...@@ -281,6 +281,7 @@ struct pp_atom_ctrl__avfs_parameters { ...@@ -281,6 +281,7 @@ struct pp_atom_ctrl__avfs_parameters {
extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage);
extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo); extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo);
extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo); extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo);
......
...@@ -810,6 +810,19 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( ...@@ -810,6 +810,19 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
} }
int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
uint32_t *vol_rep_time, uint32_t *bb_rep_time)
{
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr);
PP_ASSERT_WITH_CODE(NULL != powerplay_tab,
"Missing PowerPlay Table!", return -EINVAL);
*vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime);
*bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime);
return 0;
}
int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
unsigned long *num_of_entries) unsigned long *num_of_entries)
......
...@@ -32,16 +32,19 @@ struct pp_hw_power_state; ...@@ -32,16 +32,19 @@ struct pp_hw_power_state;
extern const struct pp_table_func pptable_funcs; extern const struct pp_table_func pptable_funcs;
typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr, typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps, struct pp_hw_power_state *hw_ps,
unsigned int index, unsigned int index,
const void *clock_info); const void *clock_info);
int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
unsigned long *num_of_entries); unsigned long *num_of_entries);
int pp_tables_get_entry(struct pp_hwmgr *hwmgr, int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
unsigned long entry_index, unsigned long entry_index,
struct pp_power_state *ps, struct pp_power_state *ps,
pp_tables_hw_clock_info_callback func); pp_tables_hw_clock_info_callback func);
int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
uint32_t *vol_rep_time, uint32_t *bb_rep_time);
#endif #endif
...@@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle); ...@@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle);
smum_wait_on_indirect_register(smumgr, \ smum_wait_on_indirect_register(smumgr, \
mm##port##_INDEX, index, value, mask) mm##port##_INDEX, index, value, mask)
#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
SMUM_FIELD_MASK(reg, field) )
#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ #define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
index, value, mask) \ index, value, mask) \
...@@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle); ...@@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle);
(SMUM_FIELD_MASK(reg, field) & ((field_val) << \ (SMUM_FIELD_MASK(reg, field) & ((field_val) << \
SMUM_FIELD_SHIFT(reg, field)))) SMUM_FIELD_SHIFT(reg, field))))
#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \
SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
reg, field)
#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ #define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
port, index, value, mask) \ port, index, value, mask) \
smum_wait_on_indirect_register(smumgr, \ smum_wait_on_indirect_register(smumgr, \
...@@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle); ...@@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle);
SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
reg, field, fieldval)) reg, field, fieldval))
#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
cgs_write_ind_register(device, port, ix##reg, \
SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
reg, field, fieldval))
#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ #define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \ SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \
(fieldval) << SMUM_FIELD_SHIFT(reg, field), \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
...@@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle); ...@@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle);
SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \ SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \
(fieldval) << SMUM_FIELD_SHIFT(reg, field), \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
SMUM_FIELD_MASK(reg, field)) SMUM_FIELD_MASK(reg, field))
#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \
smum_wait_for_indirect_register_unequal(smumgr, \
mm##port##_INDEX, index, value, mask)
#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
SMUM_FIELD_MASK(reg, field) )
#endif #endif
...@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) ...@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0) if (dig->backlight_level == 0)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
else { else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment