Commit 7b1f2485 authored by Christian König's avatar Christian König Committed by Dave Airlie

drm/radeon: make all functions work with multiple rings.

Give all asic and radeon_ring_* functions a
radeon_cp parameter, so they know the ring to work with.
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 15d3332f
......@@ -1311,18 +1311,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_cp *cp = &rdev->cp;
/* set to DX10/11 mode */
radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(rdev, 1);
radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(cp, 1);
/* FIXME: implement */
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(cp,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(rdev, ib->length_dw);
radeon_ring_write(cp, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(cp, ib->length_dw);
}
......@@ -1360,71 +1362,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
int r, i;
uint32_t cp_me;
r = radeon_ring_lock(rdev, 7);
r = radeon_ring_lock(rdev, cp, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(cp, 0x1);
radeon_ring_write(cp, 0x0);
radeon_ring_write(cp, rdev->config.evergreen.max_hw_contexts - 1);
radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, 0);
radeon_ring_unlock_commit(rdev, cp);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
r = radeon_ring_lock(rdev, evergreen_default_size + 19);
r = radeon_ring_lock(rdev, cp, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(cp, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < evergreen_default_size; i++)
radeon_ring_write(rdev, evergreen_default_state[i]);
radeon_ring_write(cp, evergreen_default_state[i]);
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(cp, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(cp, 0);
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(rdev, 0xc0026f00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(cp, 0xc0026f00);
radeon_ring_write(cp, 0x00000000);
radeon_ring_write(cp, 0x00000000);
radeon_ring_write(cp, 0x00000000);
/* Clear consts */
radeon_ring_write(rdev, 0xc0036f00);
radeon_ring_write(rdev, 0x00000bc4);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(cp, 0xc0036f00);
radeon_ring_write(cp, 0x00000bc4);
radeon_ring_write(cp, 0xffffffff);
radeon_ring_write(cp, 0xffffffff);
radeon_ring_write(cp, 0xffffffff);
radeon_ring_write(rdev, 0xc0026900);
radeon_ring_write(rdev, 0x00000316);
radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(rdev, 0x00000010); /* */
radeon_ring_write(cp, 0xc0026900);
radeon_ring_write(cp, 0x00000316);
radeon_ring_write(cp, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(cp, 0x00000010); /* */
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, cp);
return 0;
}
int evergreen_cp_resume(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
u32 tmp;
u32 rb_bufsz;
int r;
......@@ -1442,7 +1446,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
......@@ -1456,8 +1460,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
rdev->cp.wptr = 0;
WREG32(CP_RB_WPTR, rdev->cp.wptr);
cp->wptr = 0;
WREG32(CP_RB_WPTR, cp->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
......@@ -1475,16 +1479,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_RB_BASE, cp->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
rdev->cp.rptr = RREG32(CP_RB_RPTR);
cp->rptr = RREG32(CP_RB_RPTR);
evergreen_cp_start(rdev);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
cp->ready = true;
r = radeon_ring_test(rdev, cp);
if (r) {
rdev->cp.ready = false;
cp->ready = false;
return r;
}
return 0;
......@@ -2353,7 +2357,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
{
u32 srbm_status;
u32 grbm_status;
......@@ -2366,19 +2370,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, &rdev->cp);
r100_gpu_lockup_update(lockup, cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
r = radeon_ring_lock(rdev, cp, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(cp, 0x80000000);
radeon_ring_write(cp, 0x80000000);
radeon_ring_unlock_commit(rdev, cp);
}
rdev->cp.rptr = RREG32(CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
cp->rptr = RREG32(CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, cp);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
......@@ -3052,6 +3056,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
int r;
/* enable pcie gen2 link */
......@@ -3115,7 +3120,7 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
r = radeon_ring_init(rdev, cp, cp->ring_size);
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
......@@ -3150,7 +3155,7 @@ int evergreen_resume(struct radeon_device *rdev)
return r;
}
r = r600_ib_test(rdev);
r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
......@@ -3162,9 +3167,11 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
cp->ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
......@@ -3244,7 +3251,7 @@ int evergreen_init(struct radeon_device *rdev)
return r;
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
r600_ring_init(rdev, &rdev->cp, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
......@@ -3270,7 +3277,7 @@ int evergreen_init(struct radeon_device *rdev)
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
......
This diff is collapsed.
......@@ -1049,63 +1049,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
static int cayman_cp_start(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
int r, i;
r = radeon_ring_lock(rdev, 7);
r = radeon_ring_lock(rdev, cp, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(cp, 0x1);
radeon_ring_write(cp, 0x0);
radeon_ring_write(cp, rdev->config.cayman.max_hw_contexts - 1);
radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, 0);
radeon_ring_unlock_commit(rdev, cp);
cayman_cp_enable(rdev, true);
r = radeon_ring_lock(rdev, cayman_default_size + 19);
r = radeon_ring_lock(rdev, cp, cayman_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(cp, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < cayman_default_size; i++)
radeon_ring_write(rdev, cayman_default_state[i]);
radeon_ring_write(cp, cayman_default_state[i]);
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(cp, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(cp, 0);
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(rdev, 0xc0026f00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(cp, 0xc0026f00);
radeon_ring_write(cp, 0x00000000);
radeon_ring_write(cp, 0x00000000);
radeon_ring_write(cp, 0x00000000);
/* Clear consts */
radeon_ring_write(rdev, 0xc0036f00);
radeon_ring_write(rdev, 0x00000bc4);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(cp, 0xc0036f00);
radeon_ring_write(cp, 0x00000bc4);
radeon_ring_write(cp, 0xffffffff);
radeon_ring_write(cp, 0xffffffff);
radeon_ring_write(cp, 0xffffffff);
radeon_ring_write(rdev, 0xc0026900);
radeon_ring_write(rdev, 0x00000316);
radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(rdev, 0x00000010); /* */
radeon_ring_write(cp, 0xc0026900);
radeon_ring_write(cp, 0x00000316);
radeon_ring_write(cp, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(cp, 0x00000010); /* */
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, cp);
/* XXX init other rings */
......@@ -1115,11 +1116,12 @@ static int cayman_cp_start(struct radeon_device *rdev)
static void cayman_cp_fini(struct radeon_device *rdev)
{
cayman_cp_enable(rdev, false);
radeon_ring_fini(rdev);
radeon_ring_fini(rdev, &rdev->cp);
}
int cayman_cp_resume(struct radeon_device *rdev)
{
struct radeon_cp *cp;
u32 tmp;
u32 rb_bufsz;
int r;
......@@ -1145,7 +1147,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
cp = &rdev->cp;
rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
......@@ -1154,8 +1157,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
rdev->cp.wptr = 0;
WREG32(CP_RB0_WPTR, rdev->cp.wptr);
cp->wptr = 0;
WREG32(CP_RB0_WPTR, cp->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
......@@ -1172,13 +1175,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB0_CNTL, tmp);
WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_RB0_BASE, cp->gpu_addr >> 8);
rdev->cp.rptr = RREG32(CP_RB0_RPTR);
cp->rptr = RREG32(CP_RB0_RPTR);
/* ring1 - compute only */
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
cp = &rdev->cp1;
rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
......@@ -1187,8 +1191,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
rdev->cp1.wptr = 0;
WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
cp->wptr = 0;
WREG32(CP_RB1_WPTR, cp->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
......@@ -1197,13 +1201,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB1_CNTL, tmp);
WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
WREG32(CP_RB1_BASE, cp->gpu_addr >> 8);
rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
cp->rptr = RREG32(CP_RB1_RPTR);
/* ring2 - compute only */
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
cp = &rdev->cp2;
rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
......@@ -1212,8 +1217,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
rdev->cp2.wptr = 0;
WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
cp->wptr = 0;
WREG32(CP_RB2_WPTR, cp->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
......@@ -1222,9 +1227,9 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB2_CNTL, tmp);
WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
WREG32(CP_RB2_BASE, cp->gpu_addr >> 8);
rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
cp->rptr = RREG32(CP_RB2_RPTR);
/* start the rings */
cayman_cp_start(rdev);
......@@ -1232,7 +1237,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
rdev->cp1.ready = true;
rdev->cp2.ready = true;
/* this only test cp0 */
r = radeon_ring_test(rdev);
r = radeon_ring_test(rdev, &rdev->cp);
if (r) {
rdev->cp.ready = false;
rdev->cp1.ready = false;
......@@ -1243,7 +1248,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
bool cayman_gpu_is_lockup(struct radeon_device *rdev)
bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
{
u32 srbm_status;
u32 grbm_status;
......@@ -1256,20 +1261,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, &rdev->cp);
r100_gpu_lockup_update(lockup, cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
r = radeon_ring_lock(rdev, cp, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(cp, 0x80000000);
radeon_ring_write(cp, 0x80000000);
radeon_ring_unlock_commit(rdev, cp);
}
/* XXX deal with CP0,1,2 */
rdev->cp.rptr = RREG32(CP_RB0_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
cp->rptr = RREG32(CP_RB0_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, cp);
}
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
......@@ -1338,6 +1343,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
int r;
/* enable pcie gen2 link */
......@@ -1387,7 +1393,7 @@ static int cayman_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
r = radeon_ring_init(rdev, cp, cp->ring_size);
if (r)
return r;
r = cayman_cp_load_microcode(rdev);
......@@ -1417,7 +1423,7 @@ int cayman_resume(struct radeon_device *rdev)
return r;
}
r = r600_ib_test(rdev);
r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
......@@ -1448,6 +1454,7 @@ int cayman_suspend(struct radeon_device *rdev)
*/
int cayman_init(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
int r;
/* This don't do much */
......@@ -1500,8 +1507,8 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
cp->ring_obj = NULL;
r600_ring_init(rdev, cp, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
......@@ -1527,7 +1534,7 @@ int cayman_init(struct radeon_device *rdev)
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
......
This diff is collapsed.
......@@ -87,6 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
struct radeon_cp *cp = &rdev->cp;
uint32_t size;
uint32_t cur_size;
int i, num_loops;
......@@ -95,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
/* radeon pitch is /64 */
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, num_loops * 4 + 64);
r = radeon_ring_lock(rdev, cp, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, (1 << 16));
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(cp, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
radeon_ring_write(rdev, PACKET0(0x720, 2));
radeon_ring_write(rdev, src_offset);
radeon_ring_write(rdev, dst_offset);
radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
radeon_ring_write(cp, PACKET0(0x720, 2));
radeon_ring_write(cp, src_offset);
radeon_ring_write(cp, dst_offset);
radeon_ring_write(cp, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(cp, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, cp);
return r;
}
......
......@@ -175,37 +175,40 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_cp *cp = &rdev->cp;
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(cp, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(cp, 0);
/* Flush 3D cache */
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH);
radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(cp, R300_RB3D_DC_FLUSH);
radeon_ring_write(cp, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(cp, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(cp, (RADEON_WAIT_3D_IDLECLEAN |
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE));
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(cp, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(cp, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
radeon_ring_write(cp, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(cp, fence->seq);
radeon_ring_write(cp, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(cp, RADEON_SW_INT_FIRE);
}
void r300_ring_start(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
unsigned gb_tile_config;
int r;
......@@ -227,44 +230,44 @@ void r300_ring_start(struct radeon_device *rdev)
break;
}
r = radeon_ring_lock(rdev, 64);
r = radeon_ring_lock(rdev, cp, 64);
if (r) {
return;
}
radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(cp,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
radeon_ring_write(rdev, gb_tile_config);
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(R300_GB_TILE_CONFIG, 0));
radeon_ring_write(cp, gb_tile_config);
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(cp,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(cp, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(cp, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, PACKET0(R300_GB_ENABLE, 0));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(cp, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(cp, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(cp, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(cp,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(R300_GB_AA_CONFIG, 0));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(cp, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(cp, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(cp, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(cp, PACKET0(R300_GB_MSPOS0, 0));
radeon_ring_write(cp,
((6 << R300_MS_X0_SHIFT) |
(6 << R300_MS_Y0_SHIFT) |
(6 << R300_MS_X1_SHIFT) |
......@@ -273,8 +276,8 @@ void r300_ring_start(struct radeon_device *rdev)
(6 << R300_MS_Y2_SHIFT) |
(6 << R300_MSBD0_Y_SHIFT) |
(6 << R300_MSBD0_X_SHIFT)));
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(R300_GB_MSPOS1, 0));
radeon_ring_write(cp,
((6 << R300_MS_X3_SHIFT) |
(6 << R300_MS_Y3_SHIFT) |
(6 << R300_MS_X4_SHIFT) |
......@@ -282,16 +285,16 @@ void r300_ring_start(struct radeon_device *rdev)
(6 << R300_MS_X5_SHIFT) |
(6 << R300_MS_Y5_SHIFT) |
(6 << R300_MSBD1_SHIFT)));
radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(R300_GA_ENHANCE, 0));
radeon_ring_write(cp, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
radeon_ring_write(cp, PACKET0(R300_GA_POLY_MODE, 0));
radeon_ring_write(cp,
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(R300_GA_ROUND_MODE, 0));
radeon_ring_write(cp,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, cp);
}
void r300_errata(struct radeon_device *rdev)
......@@ -375,26 +378,26 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
bool r300_gpu_is_lockup(struct radeon_device *rdev)
bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
{
u32 rbbm_status;
int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
r100_gpu_lockup_update(&rdev->config.r300.lockup, cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
r = radeon_ring_lock(rdev, cp, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(cp, 0x80000000);
radeon_ring_write(cp, 0x80000000);
radeon_ring_unlock_commit(rdev, cp);
}
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
cp->rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, cp);
}
int r300_asic_reset(struct radeon_device *rdev)
......
......@@ -199,6 +199,8 @@ static void r420_clock_resume(struct radeon_device *rdev)
static void r420_cp_errata_init(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
*
......@@ -206,22 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
radeon_ring_write(rdev, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev);
radeon_ring_lock(rdev, cp, 8);
radeon_ring_write(cp, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(cp, rdev->config.r300.resync_scratch);
radeon_ring_write(cp, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, cp);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev);
radeon_ring_lock(rdev, cp, 8);
radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(cp, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev, cp);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -230,6 +230,8 @@ void radeon_fence_unref(struct radeon_fence **fence);
/*
* Semaphores.
*/
struct radeon_cp;
struct radeon_semaphore_driver {
rwlock_t lock;
struct list_head free;
......@@ -585,7 +587,7 @@ struct r600_blit {
void r600_blit_suspend(struct radeon_device *rdev);
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
......@@ -593,15 +595,15 @@ void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev);
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev);
void radeon_ring_unlock_commit(struct radeon_device *rdev);
void radeon_ring_unlock_undo(struct radeon_device *rdev);
int radeon_ring_test(struct radeon_device *rdev);
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
void radeon_ring_fini(struct radeon_device *rdev);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_cp *cp);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp);
/*
......@@ -930,24 +932,25 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
bool (*gpu_is_lockup)(struct radeon_device *rdev);
bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_cp *cp);
int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
void (*cp_fini)(struct radeon_device *rdev);
void (*cp_disable)(struct radeon_device *rdev);
void (*cp_commit)(struct radeon_device *rdev);
void (*cp_commit)(struct radeon_device *rdev, struct radeon_cp *cp);
void (*ring_start)(struct radeon_device *rdev);
int (*ring_test)(struct radeon_device *rdev);
int (*ring_test)(struct radeon_device *rdev, struct radeon_cp *cp);
void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
int (*irq_set)(struct radeon_device *rdev);
int (*irq_process)(struct radeon_device *rdev);
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
void (*semaphore_ring_emit)(struct radeon_device *rdev,
struct radeon_cp *cp,
struct radeon_semaphore *semaphore,
unsigned ring, bool emit_wait);
bool emit_wait);
int (*cs_parse)(struct radeon_cs_parser *p);
int (*copy_blit)(struct radeon_device *rdev,
uint64_t src_offset,
......@@ -1279,7 +1282,6 @@ struct radeon_device {
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
struct radeon_semaphore_driver semaphore_drv;
struct radeon_cp cp;
/* cayman compute rings */
struct radeon_cp cp1;
struct radeon_cp cp2;
struct radeon_ib_pool ib_pool;
......@@ -1463,18 +1465,17 @@ void radeon_atombios_fini(struct radeon_device *rdev);
/*
* RING helpers.
*/
#if DRM_DEBUG_CODE == 0
static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
static inline void radeon_ring_write(struct radeon_cp *cp, uint32_t v)
{
rdev->cp.ring[rdev->cp.wptr++] = v;
rdev->cp.wptr &= rdev->cp.ptr_mask;
rdev->cp.count_dw--;
rdev->cp.ring_free_dw--;
cp->ring[cp->wptr++] = v;
cp->wptr &= cp->ptr_mask;
cp->count_dw--;
cp->ring_free_dw--;
}
#else
/* With debugging this is just too big to inline */
void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
void radeon_ring_write(struct radeon_cp *cp, uint32_t v);
#endif
/*
......@@ -1486,19 +1487,19 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
#define radeon_cp_commit(rdev, cp) (rdev)->asic->cp_commit((rdev), (cp))
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
#define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, semaphore, ring, emit_wait) (rdev)->asic->semaphore_ring_emit((rdev), (semaphore), (ring), (emit_wait))
#define radeon_semaphore_ring_emit(rdev, cp, semaphore, emit_wait) (rdev)->asic->semaphore_ring_emit((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
......
......@@ -58,20 +58,21 @@ void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
bool r100_gpu_is_lockup(struct radeon_device *rdev);
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
void r100_cp_commit(struct radeon_device *rdev);
void r100_cp_commit(struct radeon_device *rdev, struct radeon_cp *cp);
void r100_ring_start(struct radeon_device *rdev);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_cp *cp,
struct radeon_semaphore *semaphore,
unsigned ring, bool emit_wait);
bool emit_wait);
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
......@@ -86,7 +87,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
......@@ -157,7 +158,7 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
......@@ -296,7 +297,7 @@ int r600_resume(struct radeon_device *rdev);
void r600_vga_set_state(struct radeon_device *rdev, bool state);
int r600_wb_init(struct radeon_device *rdev);
void r600_wb_fini(struct radeon_device *rdev);
void r600_cp_commit(struct radeon_device *rdev);
void r600_cp_commit(struct radeon_device *rdev, struct radeon_cp *cp);
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
......@@ -304,17 +305,18 @@ int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_cp *cp,
struct radeon_semaphore *semaphore,
unsigned ring, bool emit_wait);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
bool emit_wait);
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
int r600_ib_test(struct radeon_device *rdev);
int r600_ib_test(struct radeon_device *rdev, int ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence *fence);
......@@ -334,7 +336,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
bool r600_card_posted(struct radeon_device *rdev);
void r600_cp_stop(struct radeon_device *rdev);
int r600_cp_start(struct radeon_device *rdev);
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size);
int r600_cp_resume(struct radeon_device *rdev);
void r600_cp_fini(struct radeon_device *rdev);
int r600_count_pipe_bits(uint32_t val);
......@@ -403,7 +405,7 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
......@@ -434,7 +436,7 @@ int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
bool cayman_gpu_is_lockup(struct radeon_device *rdev);
bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
int cayman_asic_reset(struct radeon_device *rdev);
#endif
......@@ -246,7 +246,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_ib_get(rdev, &parser.ib);
r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &parser.ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
radeon_cs_parser_fini(&parser, r);
......
......@@ -269,7 +269,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
* if we experiencing a lockup the value doesn't change
*/
if (seq == rdev->fence_drv[fence->ring].last_seq &&
radeon_gpu_is_lockup(rdev)) {
radeon_gpu_is_lockup(rdev, &rdev->cp)) {
/* good news we believe it's a lockup */
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
......
......@@ -160,8 +160,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
RADEON_IB_POOL_SIZE*64*1024;
args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
args->gart_size -= rdev->cp.ring_size;
return 0;
}
......
......@@ -252,7 +252,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
mutex_lock(&rdev->cp.mutex);
if (rdev->cp.ring_obj)
mutex_lock(&rdev->cp.mutex);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
......@@ -268,12 +269,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
} else {
if (rdev->cp.ready) {
struct radeon_cp *cp = &rdev->cp;
if (cp->ready) {
struct radeon_fence *fence;
radeon_ring_alloc(rdev, 64);
radeon_ring_alloc(rdev, cp, 64);
radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_emit(rdev, fence);
radeon_ring_commit(rdev);
radeon_ring_commit(rdev, cp);
radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
}
......@@ -307,7 +309,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
mutex_unlock(&rdev->cp.mutex);
if (rdev->cp.ring_obj)
mutex_unlock(&rdev->cp.mutex);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
......
......@@ -60,17 +60,17 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
return idx_value;
}
void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
void radeon_ring_write(struct radeon_cp *cp, uint32_t v)
{
#if DRM_DEBUG_CODE
if (rdev->cp.count_dw <= 0) {
if (cp->count_dw <= 0) {
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
}
#endif
rdev->cp.ring[rdev->cp.wptr++] = v;
rdev->cp.wptr &= rdev->cp.ptr_mask;
rdev->cp.count_dw--;
rdev->cp.ring_free_dw--;
cp->ring[cp->wptr++] = v;
cp->wptr &= cp->ptr_mask;
cp->count_dw--;
cp->ring_free_dw--;
}
void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
......@@ -106,14 +106,14 @@ void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
/*
* IB.
*/
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
{
struct radeon_fence *fence;
struct radeon_ib *nib;
int r = 0, i, c;
*ib = NULL;
r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
r = radeon_fence_create(rdev, &fence, ring);
if (r) {
dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
......@@ -178,16 +178,17 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_cp *cp = &rdev->cp;
int r = 0;
if (!ib->length_dw || !rdev->cp.ready) {
if (!ib->length_dw || !cp->ready) {
/* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, 64);
r = radeon_ring_lock(rdev, cp, 64);
if (r) {
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
......@@ -198,7 +199,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
/* once scheduled IB is considered free and protected by the fence */
ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, cp);
return 0;
}
......@@ -283,7 +284,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
/*
* Ring.
*/
void radeon_ring_free_size(struct radeon_device *rdev)
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp)
{
if (rdev->wb.enabled)
rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
......@@ -294,122 +295,123 @@ void radeon_ring_free_size(struct radeon_device *rdev)
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
}
/* This works because ring_size is a power of 2 */
rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
rdev->cp.ring_free_dw -= rdev->cp.wptr;
rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
if (!rdev->cp.ring_free_dw) {
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
cp->ring_free_dw = (cp->rptr + (cp->ring_size / 4));
cp->ring_free_dw -= cp->wptr;
cp->ring_free_dw &= cp->ptr_mask;
if (!cp->ring_free_dw) {
cp->ring_free_dw = cp->ring_size / 4;
}
}
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
while (ndw > (rdev->cp.ring_free_dw - 1)) {
radeon_ring_free_size(rdev);
if (ndw < rdev->cp.ring_free_dw) {
ndw = (ndw + cp->align_mask) & ~cp->align_mask;
while (ndw > (cp->ring_free_dw - 1)) {
radeon_ring_free_size(rdev, cp);
if (ndw < cp->ring_free_dw) {
break;
}
r = radeon_fence_wait_next(rdev, RADEON_RING_TYPE_GFX_INDEX);
if (r)
return r;
}
rdev->cp.count_dw = ndw;
rdev->cp.wptr_old = rdev->cp.wptr;
cp->count_dw = ndw;
cp->wptr_old = cp->wptr;
return 0;
}
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
{
int r;
mutex_lock(&rdev->cp.mutex);
r = radeon_ring_alloc(rdev, ndw);
mutex_lock(&cp->mutex);
r = radeon_ring_alloc(rdev, cp, ndw);
if (r) {
mutex_unlock(&rdev->cp.mutex);
mutex_unlock(&cp->mutex);
return r;
}
return 0;
}
void radeon_ring_commit(struct radeon_device *rdev)
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp)
{
unsigned count_dw_pad;
unsigned i;
/* We pad to match fetch size */
count_dw_pad = (rdev->cp.align_mask + 1) -
(rdev->cp.wptr & rdev->cp.align_mask);
count_dw_pad = (cp->align_mask + 1) -
(cp->wptr & cp->align_mask);
for (i = 0; i < count_dw_pad; i++) {
radeon_ring_write(rdev, 2 << 30);
radeon_ring_write(cp, 2 << 30);
}
DRM_MEMORYBARRIER();
radeon_cp_commit(rdev);
radeon_cp_commit(rdev, cp);
}
void radeon_ring_unlock_commit(struct radeon_device *rdev)
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp)
{
radeon_ring_commit(rdev);
mutex_unlock(&rdev->cp.mutex);
radeon_ring_commit(rdev, cp);
mutex_unlock(&cp->mutex);
}
void radeon_ring_unlock_undo(struct radeon_device *rdev)
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp)
{
rdev->cp.wptr = rdev->cp.wptr_old;
mutex_unlock(&rdev->cp.mutex);
cp->wptr = cp->wptr_old;
mutex_unlock(&cp->mutex);
}
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size)
{
int r;
rdev->cp.ring_size = ring_size;
cp->ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
if (cp->ring_obj == NULL) {
r = radeon_bo_create(rdev, cp->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->cp.ring_obj);
&cp->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
r = radeon_bo_reserve(cp->ring_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->cp.gpu_addr);
r = radeon_bo_pin(cp->ring_obj, RADEON_GEM_DOMAIN_GTT,
&cp->gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->cp.ring_obj);
radeon_bo_unreserve(cp->ring_obj);
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
r = radeon_bo_kmap(rdev->cp.ring_obj,
(void **)&rdev->cp.ring);
radeon_bo_unreserve(rdev->cp.ring_obj);
r = radeon_bo_kmap(cp->ring_obj,
(void **)&cp->ring);
radeon_bo_unreserve(cp->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
cp->ptr_mask = (cp->ring_size / 4) - 1;
cp->ring_free_dw = cp->ring_size / 4;
return 0;
}
void radeon_ring_fini(struct radeon_device *rdev)
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp)
{
int r;
struct radeon_bo *ring_obj;
mutex_lock(&rdev->cp.mutex);
ring_obj = rdev->cp.ring_obj;
rdev->cp.ring = NULL;
rdev->cp.ring_obj = NULL;
mutex_unlock(&rdev->cp.mutex);
mutex_lock(&cp->mutex);
ring_obj = cp->ring_obj;
cp->ring = NULL;
cp->ring_obj = NULL;
mutex_unlock(&cp->mutex);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
......@@ -422,7 +424,6 @@ void radeon_ring_fini(struct radeon_device *rdev)
}
}
/*
* Debugfs info
*/
......
......@@ -121,13 +121,13 @@ int radeon_semaphore_create(struct radeon_device *rdev,
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
radeon_semaphore_ring_emit(rdev, semaphore, ring, false);
radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, false);
}
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
radeon_semaphore_ring_emit(rdev, semaphore, ring, true);
radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, true);
}
void radeon_semaphore_free(struct radeon_device *rdev,
......
......@@ -42,7 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
n -= rdev->cp.ring_size;
if (rdev->wb.wb_obj)
n -= RADEON_GPU_PAGE_SIZE;
if (rdev->ih.ring_obj)
......
......@@ -55,44 +55,45 @@ void rv515_debugfs(struct radeon_device *rdev)
void rv515_ring_start(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
int r;
r = radeon_ring_lock(rdev, 64);
r = radeon_ring_lock(rdev, cp, 64);
if (r) {
return;
}
radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(ISYNC_CNTL, 0));
radeon_ring_write(cp,
ISYNC_ANY2D_IDLE3D |
ISYNC_ANY3D_IDLE2D |
ISYNC_WAIT_IDLEGUI |
ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(cp, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(cp, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(cp, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(cp, PACKET0(GB_SELECT, 0));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, PACKET0(GB_ENABLE, 0));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, PACKET0(R500_SU_REG_DEST, 0));
radeon_ring_write(cp, (1 << rdev->num_gb_pipes) - 1);
radeon_ring_write(cp, PACKET0(VAP_INDEX_OFFSET, 0));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(cp, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(cp, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(cp, ZC_FLUSH | ZC_FREE);
radeon_ring_write(cp, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(cp, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(cp, PACKET0(GB_AA_CONFIG, 0));
radeon_ring_write(cp, 0);
radeon_ring_write(cp, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(cp, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(cp, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(cp, ZC_FLUSH | ZC_FREE);
radeon_ring_write(cp, PACKET0(GB_MSPOS0, 0));
radeon_ring_write(cp,
((6 << MS_X0_SHIFT) |
(6 << MS_Y0_SHIFT) |
(6 << MS_X1_SHIFT) |
......@@ -101,8 +102,8 @@ void rv515_ring_start(struct radeon_device *rdev)
(6 << MS_Y2_SHIFT) |
(6 << MSBD0_Y_SHIFT) |
(6 << MSBD0_X_SHIFT)));
radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
radeon_ring_write(rdev,
radeon_ring_write(cp, PACKET0(GB_MSPOS1, 0));
radeon_ring_write(cp,
((6 << MS_X3_SHIFT) |
(6 << MS_Y3_SHIFT) |
(6 << MS_X4_SHIFT) |
......@@ -110,15 +111,15 @@ void rv515_ring_start(struct radeon_device *rdev)
(6 << MS_X5_SHIFT) |
(6 << MS_Y5_SHIFT) |
(6 << MSBD1_SHIFT)));
radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(rdev, PACKET0(0x20C8, 0));
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(cp, PACKET0(GA_ENHANCE, 0));
radeon_ring_write(cp, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
radeon_ring_write(cp, PACKET0(GA_POLY_MODE, 0));
radeon_ring_write(cp, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
radeon_ring_write(cp, PACKET0(GA_ROUND_MODE, 0));
radeon_ring_write(cp, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(cp, PACKET0(0x20C8, 0));
radeon_ring_write(cp, 0);
radeon_ring_unlock_commit(rdev, cp);
}
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
......
......@@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
void r700_cp_fini(struct radeon_device *rdev)
{
r700_cp_stop(rdev);
radeon_ring_fini(rdev);
radeon_ring_fini(rdev, &rdev->cp);
}
/*
......@@ -1043,6 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_cp *cp = &rdev->cp;
int r;
/* enable pcie gen2 link */
......@@ -1091,7 +1092,7 @@ static int rv770_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
r = radeon_ring_init(rdev, cp, cp->ring_size);
if (r)
return r;
r = rv770_cp_load_microcode(rdev);
......@@ -1121,7 +1122,7 @@ int rv770_resume(struct radeon_device *rdev)
return r;
}
r = r600_ib_test(rdev);
r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
......@@ -1216,7 +1217,7 @@ int rv770_init(struct radeon_device *rdev)
return r;
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
r600_ring_init(rdev, &rdev->cp, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
......@@ -1242,7 +1243,7 @@ int rv770_init(struct radeon_device *rdev)
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
} else {
r = r600_ib_test(rdev);
r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
dev_err(rdev->dev, "IB test failed (%d).\n", r);
rdev->accel_working = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment