Commit 1b37078b authored by Alex Deucher's avatar Alex Deucher Committed by Dave Airlie

drm/radeon/kms: add support for per-ring fence interrupts

Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent b40e7e16
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
static void evergreen_gpu_init(struct radeon_device *rdev); static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev); void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{ {
...@@ -2474,7 +2476,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) ...@@ -2474,7 +2476,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{ {
u32 tmp; u32 tmp;
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); if (rdev->family >= CHIP_CAYMAN) {
cayman_cp_int_cntl_setup(rdev, 0,
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cayman_cp_int_cntl_setup(rdev, 1, 0);
cayman_cp_int_cntl_setup(rdev, 2, 0);
} else
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0); WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
...@@ -2519,6 +2527,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) ...@@ -2519,6 +2527,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
int evergreen_irq_set(struct radeon_device *rdev) int evergreen_irq_set(struct radeon_device *rdev)
{ {
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0; u32 grbm_int_cntl = 0;
...@@ -2543,11 +2552,28 @@ int evergreen_irq_set(struct radeon_device *rdev) ...@@ -2543,11 +2552,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
if (rdev->irq.sw_int) { if (rdev->family >= CHIP_CAYMAN) {
DRM_DEBUG("evergreen_irq_set: sw int\n"); /* enable CP interrupts on all rings */
cp_int_cntl |= RB_INT_ENABLE; if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
cp_int_cntl |= TIME_STAMP_INT_ENABLE; DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
} else {
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
} }
if (rdev->irq.crtc_vblank_int[0] || if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) { rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n"); DRM_DEBUG("evergreen_irq_set: vblank 0\n");
...@@ -2607,7 +2633,12 @@ int evergreen_irq_set(struct radeon_device *rdev) ...@@ -2607,7 +2633,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
grbm_int_cntl |= GUI_IDLE_INT_ENABLE; grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
} }
WREG32(CP_INT_CNTL, cp_int_cntl); if (rdev->family >= CHIP_CAYMAN) {
cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
} else
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(GRBM_INT_CNTL, grbm_int_cntl); WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
...@@ -3026,7 +3057,20 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -3026,7 +3057,20 @@ int evergreen_irq_process(struct radeon_device *rdev)
break; break;
case 181: /* CP EOP event */ case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n"); DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); if (rdev->family >= CHIP_CAYMAN) {
switch (src_data) {
case 0:
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 1:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
break;
case 2:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
break;
}
} else
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break; break;
case 233: /* GUI IDLE */ case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n"); DRM_DEBUG("IH: GUI idle\n");
......
...@@ -1006,6 +1006,15 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev) ...@@ -1006,6 +1006,15 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
} }
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl)
{
u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
WREG32(CP_INT_CNTL, cp_int_cntl);
}
/* /*
* CP. * CP.
*/ */
......
...@@ -42,6 +42,9 @@ ...@@ -42,6 +42,9 @@
#define CAYMAN_MAX_TCC_MASK 0xFF #define CAYMAN_MAX_TCC_MASK 0xFF
#define DMIF_ADDR_CONFIG 0xBD4 #define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50 #define SRBM_STATUS 0x0E50
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 #define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
...@@ -394,6 +397,12 @@ ...@@ -394,6 +397,12 @@
#define CP_RB0_RPTR_ADDR 0xC10C #define CP_RB0_RPTR_ADDR 0xC10C
#define CP_RB0_RPTR_ADDR_HI 0xC110 #define CP_RB0_RPTR_ADDR_HI 0xC110
#define CP_RB0_WPTR 0xC114 #define CP_RB0_WPTR 0xC114
#define CP_INT_CNTL 0xC124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
# define TIME_STAMP_INT_ENABLE (1 << 26)
#define CP_RB1_BASE 0xC180 #define CP_RB1_BASE 0xC180
#define CP_RB1_CNTL 0xC184 #define CP_RB1_CNTL 0xC184
#define CP_RB1_RPTR_ADDR 0xC188 #define CP_RB1_RPTR_ADDR 0xC188
......
...@@ -667,7 +667,7 @@ int r100_irq_set(struct radeon_device *rdev) ...@@ -667,7 +667,7 @@ int r100_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0); WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL; return -EINVAL;
} }
if (rdev->irq.sw_int) { if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
tmp |= RADEON_SW_INT_ENABLE; tmp |= RADEON_SW_INT_ENABLE;
} }
if (rdev->irq.gui_idle) { if (rdev->irq.gui_idle) {
......
...@@ -3098,7 +3098,7 @@ int r600_irq_set(struct radeon_device *rdev) ...@@ -3098,7 +3098,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
} }
if (rdev->irq.sw_int) { if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
DRM_DEBUG("r600_irq_set: sw int\n"); DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE;
......
...@@ -107,6 +107,17 @@ extern int radeon_msi; ...@@ -107,6 +107,17 @@ extern int radeon_msi;
#define RADEONFB_CONN_LIMIT 4 #define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8 #define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
#define RADEON_NUM_RINGS 3
/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2
/* /*
* Errata workarounds. * Errata workarounds.
*/ */
...@@ -464,7 +475,7 @@ union radeon_irq_stat_regs { ...@@ -464,7 +475,7 @@ union radeon_irq_stat_regs {
struct radeon_irq { struct radeon_irq {
bool installed; bool installed;
bool sw_int; bool sw_int[RADEON_NUM_RINGS];
bool crtc_vblank_int[RADEON_MAX_CRTCS]; bool crtc_vblank_int[RADEON_MAX_CRTCS];
bool pflip[RADEON_MAX_CRTCS]; bool pflip[RADEON_MAX_CRTCS];
wait_queue_head_t vblank_queue; wait_queue_head_t vblank_queue;
...@@ -474,7 +485,7 @@ struct radeon_irq { ...@@ -474,7 +485,7 @@ struct radeon_irq {
wait_queue_head_t idle_queue; wait_queue_head_t idle_queue;
bool hdmi[RADEON_MAX_HDMI_BLOCKS]; bool hdmi[RADEON_MAX_HDMI_BLOCKS];
spinlock_t sw_lock; spinlock_t sw_lock;
int sw_refcount; int sw_refcount[RADEON_NUM_RINGS];
union radeon_irq_stat_regs stat_regs; union radeon_irq_stat_regs stat_regs;
spinlock_t pflip_lock[RADEON_MAX_CRTCS]; spinlock_t pflip_lock[RADEON_MAX_CRTCS];
int pflip_refcount[RADEON_MAX_CRTCS]; int pflip_refcount[RADEON_MAX_CRTCS];
...@@ -482,8 +493,8 @@ struct radeon_irq { ...@@ -482,8 +493,8 @@ struct radeon_irq {
int radeon_irq_kms_init(struct radeon_device *rdev); int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev); void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
...@@ -491,17 +502,6 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); ...@@ -491,17 +502,6 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
* CP & rings. * CP & rings.
*/ */
/* max number of rings */
#define RADEON_NUM_RINGS 3
/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2
struct radeon_ib { struct radeon_ib {
struct list_head list; struct list_head list;
unsigned idx; unsigned idx;
......
...@@ -230,18 +230,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) ...@@ -230,18 +230,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
seq = rdev->fence_drv[fence->ring].last_seq; seq = rdev->fence_drv[fence->ring].last_seq;
trace_radeon_fence_wait_begin(rdev->ddev, seq); trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) { if (intr) {
radeon_irq_kms_sw_irq_get(rdev); radeon_irq_kms_sw_irq_get(rdev, fence->ring);
r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue, r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout); radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev); radeon_irq_kms_sw_irq_put(rdev, fence->ring);
if (unlikely(r < 0)) { if (unlikely(r < 0)) {
return r; return r;
} }
} else { } else {
radeon_irq_kms_sw_irq_get(rdev); radeon_irq_kms_sw_irq_get(rdev, fence->ring);
r = wait_event_timeout(rdev->fence_drv[fence->ring].queue, r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout); radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev); radeon_irq_kms_sw_irq_put(rdev, fence->ring);
} }
trace_radeon_fence_wait_end(rdev->ddev, seq); trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) { if (unlikely(!radeon_fence_signaled(fence))) {
......
...@@ -65,7 +65,8 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) ...@@ -65,7 +65,8 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
unsigned i; unsigned i;
/* Disable *all* interrupts */ /* Disable *all* interrupts */
rdev->irq.sw_int = false; for (i = 0; i < RADEON_NUM_RINGS; i++)
rdev->irq.sw_int[i] = false;
rdev->irq.gui_idle = false; rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++) for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false; rdev->irq.hpd[i] = false;
...@@ -81,9 +82,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) ...@@ -81,9 +82,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
int radeon_driver_irq_postinstall_kms(struct drm_device *dev) int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{ {
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
unsigned i;
dev->max_vblank_count = 0x001fffff; dev->max_vblank_count = 0x001fffff;
rdev->irq.sw_int = true; for (i = 0; i < RADEON_NUM_RINGS; i++)
rdev->irq.sw_int[i] = true;
radeon_irq_set(rdev); radeon_irq_set(rdev);
return 0; return 0;
} }
...@@ -97,7 +100,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) ...@@ -97,7 +100,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
return; return;
} }
/* Disable *all* interrupts */ /* Disable *all* interrupts */
rdev->irq.sw_int = false; for (i = 0; i < RADEON_NUM_RINGS; i++)
rdev->irq.sw_int[i] = false;
rdev->irq.gui_idle = false; rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++) for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false; rdev->irq.hpd[i] = false;
...@@ -194,26 +198,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) ...@@ -194,26 +198,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
flush_work_sync(&rdev->hotplug_work); flush_work_sync(&rdev->hotplug_work);
} }
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{ {
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) { if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
rdev->irq.sw_int = true; rdev->irq.sw_int[ring] = true;
radeon_irq_set(rdev); radeon_irq_set(rdev);
} }
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
} }
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{ {
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0); BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) { if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
rdev->irq.sw_int = false; rdev->irq.sw_int[ring] = false;
radeon_irq_set(rdev); radeon_irq_set(rdev);
} }
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
......
...@@ -549,7 +549,7 @@ int rs600_irq_set(struct radeon_device *rdev) ...@@ -549,7 +549,7 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0); WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL; return -EINVAL;
} }
if (rdev->irq.sw_int) { if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
tmp |= S_000040_SW_INT_EN(1); tmp |= S_000040_SW_INT_EN(1);
} }
if (rdev->irq.gui_idle) { if (rdev->irq.gui_idle) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment