Commit c19e329d authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/pmu: switch to subdev printk macros

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 27cc60a1
...@@ -92,7 +92,8 @@ static void ...@@ -92,7 +92,8 @@ static void
nvkm_pmu_recv(struct work_struct *work) nvkm_pmu_recv(struct work_struct *work)
{ {
struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work); struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
struct nvkm_device *device = pmu->subdev.device; struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = subdev->device;
u32 process, message, data0, data1; u32 process, message, data0, data1;
/* nothing to do if GET == PUT */ /* nothing to do if GET == PUT */
...@@ -132,12 +133,12 @@ nvkm_pmu_recv(struct work_struct *work) ...@@ -132,12 +133,12 @@ nvkm_pmu_recv(struct work_struct *work)
/* right now there's no other expected responses from the engine, /* right now there's no other expected responses from the engine,
* so assume that any unexpected message is an error. * so assume that any unexpected message is an error.
*/ */
nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n", nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
(char)((process & 0x000000ff) >> 0), (char)((process & 0x000000ff) >> 0),
(char)((process & 0x0000ff00) >> 8), (char)((process & 0x0000ff00) >> 8),
(char)((process & 0x00ff0000) >> 16), (char)((process & 0x00ff0000) >> 16),
(char)((process & 0xff000000) >> 24), (char)((process & 0xff000000) >> 24),
process, message, data0, data1); process, message, data0, data1);
} }
static void static void
...@@ -151,8 +152,9 @@ nvkm_pmu_intr(struct nvkm_subdev *subdev) ...@@ -151,8 +152,9 @@ nvkm_pmu_intr(struct nvkm_subdev *subdev)
if (intr & 0x00000020) { if (intr & 0x00000020) {
u32 stat = nvkm_rd32(device, 0x10a16c); u32 stat = nvkm_rd32(device, 0x10a16c);
if (stat & 0x80000000) { if (stat & 0x80000000) {
nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n", nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
stat & 0x00ffffff, nvkm_rd32(device, 0x10a168)); stat & 0x00ffffff,
nvkm_rd32(device, 0x10a168));
nvkm_wr32(device, 0x10a16c, 0x00000000); nvkm_wr32(device, 0x10a16c, 0x00000000);
intr &= ~0x00000020; intr &= ~0x00000020;
} }
...@@ -165,14 +167,15 @@ nvkm_pmu_intr(struct nvkm_subdev *subdev) ...@@ -165,14 +167,15 @@ nvkm_pmu_intr(struct nvkm_subdev *subdev)
} }
if (intr & 0x00000080) { if (intr & 0x00000080) {
nv_info(pmu, "wr32 0x%06x 0x%08x\n", nvkm_rd32(device, 0x10a7a0), nvkm_info(subdev, "wr32 %06x %08x\n",
nvkm_rd32(device, 0x10a7a4)); nvkm_rd32(device, 0x10a7a0),
nvkm_rd32(device, 0x10a7a4));
nvkm_wr32(device, 0x10a004, 0x00000080); nvkm_wr32(device, 0x10a004, 0x00000080);
intr &= ~0x00000080; intr &= ~0x00000080;
} }
if (intr) { if (intr) {
nv_error(pmu, "intr 0x%08x\n", intr); nvkm_error(subdev, "intr %08x\n", intr);
nvkm_wr32(device, 0x10a004, intr); nvkm_wr32(device, 0x10a004, intr);
} }
} }
......
...@@ -81,7 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) ...@@ -81,7 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
if (nv_device_match(device, 0x11fc, 0x17aa, 0x2211) /* Lenovo W541 */ if (nv_device_match(device, 0x11fc, 0x17aa, 0x2211) /* Lenovo W541 */
|| nv_device_match(device, 0x11fc, 0x17aa, 0x221e) /* Lenovo W541 */ || nv_device_match(device, 0x11fc, 0x17aa, 0x221e) /* Lenovo W541 */
|| nvkm_boolopt(device->cfgopt, "War00C800_0", false)) { || nvkm_boolopt(device->cfgopt, "War00C800_0", false)) {
nv_info(pmu, "hw bug workaround enabled\n"); nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
switch (device->chipset) { switch (device->chipset) {
case 0xe4: case 0xe4:
magic(device, 0x04000000); magic(device, 0x04000000);
......
...@@ -84,7 +84,8 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, ...@@ -84,7 +84,8 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
level = min(clk->state_nr - 1, level); level = min(clk->state_nr - 1, level);
} }
nv_trace(pmu, "cur level = %d, new level = %d\n", cur_level, level); nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
cur_level, level);
*state = level; *state = level;
...@@ -119,8 +120,10 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) ...@@ -119,8 +120,10 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
container_of(alarm, struct gk20a_pmu, alarm); container_of(alarm, struct gk20a_pmu, alarm);
struct gk20a_pmu_dvfs_data *data = pmu->data; struct gk20a_pmu_dvfs_data *data = pmu->data;
struct gk20a_pmu_dvfs_dev_status status; struct gk20a_pmu_dvfs_dev_status status;
struct nvkm_clk *clk = nvkm_clk(pmu); struct nvkm_subdev *subdev = &pmu->base.subdev;
struct nvkm_volt *volt = nvkm_volt(pmu); struct nvkm_device *device = subdev->device;
struct nvkm_clk *clk = device->clk;
struct nvkm_volt *volt = device->volt;
u32 utilization = 0; u32 utilization = 0;
int state, ret; int state, ret;
...@@ -133,7 +136,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) ...@@ -133,7 +136,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status); ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status);
if (ret) { if (ret) {
nv_warn(pmu, "failed to get device status\n"); nvkm_warn(subdev, "failed to get device status\n");
goto resched; goto resched;
} }
...@@ -142,17 +145,17 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) ...@@ -142,17 +145,17 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
data->avg_load = (data->p_smooth * data->avg_load) + utilization; data->avg_load = (data->p_smooth * data->avg_load) + utilization;
data->avg_load /= data->p_smooth + 1; data->avg_load /= data->p_smooth + 1;
nv_trace(pmu, "utilization = %d %%, avg_load = %d %%\n", nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
utilization, data->avg_load); utilization, data->avg_load);
ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state); ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state);
if (ret) { if (ret) {
nv_warn(pmu, "failed to get current state\n"); nvkm_warn(subdev, "failed to get current state\n");
goto resched; goto resched;
} }
if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) { if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
nv_trace(pmu, "set new state to %d\n", state); nvkm_trace(subdev, "set new state to %d\n", state);
gk20a_pmu_dvfs_target(pmu, &state); gk20a_pmu_dvfs_target(pmu, &state);
} }
......
...@@ -72,7 +72,8 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec) ...@@ -72,7 +72,8 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
{ {
struct nvkm_memx *memx = *pmemx; struct nvkm_memx *memx = *pmemx;
struct nvkm_pmu *pmu = memx->pmu; struct nvkm_pmu *pmu = memx->pmu;
struct nvkm_device *device = pmu->subdev.device; struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = subdev->device;
u32 finish, reply[2]; u32 finish, reply[2];
/* flush the cache... */ /* flush the cache... */
...@@ -88,8 +89,8 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec) ...@@ -88,8 +89,8 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
memx->base, finish); memx->base, finish);
} }
nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n", nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
reply[0], reply[1]); reply[0], reply[1]);
kfree(memx); kfree(memx);
return 0; return 0;
} }
...@@ -97,7 +98,7 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec) ...@@ -97,7 +98,7 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
void void
nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data) nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
{ {
nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data); nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data }); memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
} }
...@@ -105,8 +106,8 @@ void ...@@ -105,8 +106,8 @@ void
nvkm_memx_wait(struct nvkm_memx *memx, nvkm_memx_wait(struct nvkm_memx *memx,
u32 addr, u32 mask, u32 data, u32 nsec) u32 addr, u32 mask, u32 data, u32 nsec)
{ {
nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n", nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
addr, mask, data, nsec); addr, mask, data, nsec);
memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec }); memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
memx_out(memx); /* fuc can't handle multiple */ memx_out(memx); /* fuc can't handle multiple */
} }
...@@ -114,7 +115,7 @@ nvkm_memx_wait(struct nvkm_memx *memx, ...@@ -114,7 +115,7 @@ nvkm_memx_wait(struct nvkm_memx *memx,
void void
nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec) nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
{ {
nv_debug(memx->pmu, " DELAY = %d ns\n", nsec); nvkm_debug(&memx->pmu->subdev, " DELAY = %d ns\n", nsec);
memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec }); memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
memx_out(memx); /* fuc can't handle multiple */ memx_out(memx); /* fuc can't handle multiple */
} }
...@@ -122,7 +123,8 @@ nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec) ...@@ -122,7 +123,8 @@ nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
void void
nvkm_memx_wait_vblank(struct nvkm_memx *memx) nvkm_memx_wait_vblank(struct nvkm_memx *memx)
{ {
struct nvkm_device *device = memx->pmu->subdev.device; struct nvkm_subdev *subdev = &memx->pmu->subdev;
struct nvkm_device *device = subdev->device;
u32 heads, x, y, px = 0; u32 heads, x, y, px = 0;
int i, head_sync; int i, head_sync;
...@@ -143,11 +145,11 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx) ...@@ -143,11 +145,11 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx)
} }
if (px == 0) { if (px == 0) {
nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n"); nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
return; return;
} }
nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync); nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync }); memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
memx_out(memx); /* fuc can't handle multiple */ memx_out(memx); /* fuc can't handle multiple */
} }
...@@ -155,7 +157,7 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx) ...@@ -155,7 +157,7 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx)
void void
nvkm_memx_train(struct nvkm_memx *memx) nvkm_memx_train(struct nvkm_memx *memx)
{ {
nv_debug(memx->pmu, " MEM TRAIN\n"); nvkm_debug(&memx->pmu->subdev, " MEM TRAIN\n");
memx_cmd(memx, MEMX_TRAIN, 0, NULL); memx_cmd(memx, MEMX_TRAIN, 0, NULL);
} }
...@@ -188,14 +190,14 @@ nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize) ...@@ -188,14 +190,14 @@ nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
void void
nvkm_memx_block(struct nvkm_memx *memx) nvkm_memx_block(struct nvkm_memx *memx)
{ {
nv_debug(memx->pmu, " HOST BLOCKED\n"); nvkm_debug(&memx->pmu->subdev, " HOST BLOCKED\n");
memx_cmd(memx, MEMX_ENTER, 0, NULL); memx_cmd(memx, MEMX_ENTER, 0, NULL);
} }
void void
nvkm_memx_unblock(struct nvkm_memx *memx) nvkm_memx_unblock(struct nvkm_memx *memx)
{ {
nv_debug(memx->pmu, " HOST UNBLOCKED\n"); nvkm_debug(&memx->pmu->subdev, " HOST UNBLOCKED\n");
memx_cmd(memx, MEMX_LEAVE, 0, NULL); memx_cmd(memx, MEMX_LEAVE, 0, NULL);
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment