Commit cdbe8b54 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2012-02-07' of...

Merge tag 'drm-intel-next-2012-02-07' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next

* tag 'drm-intel-next-2012-02-07' of git://people.freedesktop.org/~danvet/drm-intel: (29 commits)
  drm/i915: Handle unmappable buffers during error state capture
  drm/i915: rewrite shmem_pread_slow to use copy_to_user
  drm/i915: rewrite shmem_pwrite_slow to use copy_from_user
  drm/i915: fall through pwrite_gtt_slow to the shmem slow path
  drm/i915: add debugfs file for swizzling information
  drm/i915: fix swizzle detection for gen3
  drm/i915: Remove the upper limit on the bo size for mapping into the CPU domain
  drm/i915: add per-ring fault reg to error_state
  drm/i915: reject GTT domain in relocations
  drm/i915: remove the i915_batchbuffer_info debugfs file
  drm/i915: capture error_state also for stuck rings
  drm/i915: refactor debugfs create functions
  drm/i915: refactor debugfs open function
  drm/i915: don't trash the gtt when running out of fences
  drm/i915: Separate fence pin counting from normal bind pin counting
  drm/i915/ringbuffer: kill snb blt workaround
  drm/i915: collect more per ring error state
  drm/i915: refactor ring error state capture to use arrays
  drm/i915: switch ring->id to be a real id
  drm/i915: set AUD_CONFIG N_value_index for DisplayPort
  ...
parents 285484e2 172975aa
This diff is collapsed.
...@@ -2132,7 +2132,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -2132,7 +2132,7 @@ int i915_driver_unload(struct drm_device *dev)
unregister_shrinker(&dev_priv->mm.inactive_shrinker); unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev); ret = i915_gpu_idle(dev, true);
if (ret) if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret); DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -135,6 +135,7 @@ struct drm_i915_fence_reg { ...@@ -135,6 +135,7 @@ struct drm_i915_fence_reg {
struct list_head lru_list; struct list_head lru_list;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
uint32_t setup_seqno; uint32_t setup_seqno;
int pin_count;
}; };
struct sdvo_device_mapping { struct sdvo_device_mapping {
...@@ -152,26 +153,21 @@ struct drm_i915_error_state { ...@@ -152,26 +153,21 @@ struct drm_i915_error_state {
u32 eir; u32 eir;
u32 pgtbl_er; u32 pgtbl_er;
u32 pipestat[I915_MAX_PIPES]; u32 pipestat[I915_MAX_PIPES];
u32 ipeir; u32 tail[I915_NUM_RINGS];
u32 ipehr; u32 head[I915_NUM_RINGS];
u32 instdone; u32 ipeir[I915_NUM_RINGS];
u32 acthd; u32 ipehr[I915_NUM_RINGS];
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 error; /* gen6+ */ u32 error; /* gen6+ */
u32 bcs_acthd; /* gen6+ blt engine */ u32 instpm[I915_NUM_RINGS];
u32 bcs_ipehr; u32 instps[I915_NUM_RINGS];
u32 bcs_ipeir;
u32 bcs_instdone;
u32 bcs_seqno;
u32 vcs_acthd; /* gen6+ bsd engine */
u32 vcs_ipehr;
u32 vcs_ipeir;
u32 vcs_instdone;
u32 vcs_seqno;
u32 instpm;
u32 instps;
u32 instdone1; u32 instdone1;
u32 seqno; u32 seqno[I915_NUM_RINGS];
u64 bbaddr; u64 bbaddr;
u32 fault_reg[I915_NUM_RINGS];
u32 done_reg;
u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES]; u64 fence[I915_MAX_NUM_FENCES];
struct timeval time; struct timeval time;
struct drm_i915_error_object { struct drm_i915_error_object {
...@@ -1170,6 +1166,24 @@ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -1170,6 +1166,24 @@ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined); struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
static inline void
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count++;
}
}
static inline void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
}
void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj); void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
...@@ -1183,13 +1197,14 @@ void i915_gem_do_init(struct drm_device *dev, ...@@ -1183,13 +1197,14 @@ void i915_gem_do_init(struct drm_device *dev,
unsigned long start, unsigned long start,
unsigned long mappable_end, unsigned long mappable_end,
unsigned long end); unsigned long end);
int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
int __must_check i915_gem_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring, int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_request *request); struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring, int __must_check i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno); uint32_t seqno,
bool do_retire);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
......
This diff is collapsed.
...@@ -195,7 +195,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -195,7 +195,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only); trace_i915_gem_evict_everything(dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */ /* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev); ret = i915_gpu_idle(dev, true);
if (ret) if (ret)
return ret; return ret;
......
...@@ -203,9 +203,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, ...@@ -203,9 +203,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
cd->invalidate_domains |= invalidate_domains; cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains; cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS) if (flush_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= obj->ring->id; cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS) if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= ring->id; cd->flush_rings |= intel_ring_flag(ring);
} }
struct eb_objects { struct eb_objects {
...@@ -303,8 +303,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -303,8 +303,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->write_domain); reloc->write_domain);
return ret; return ret;
} }
if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) { if (unlikely((reloc->write_domain | reloc->read_domains)
DRM_ERROR("reloc with read/write CPU domains: " & ~I915_GEM_GPU_DOMAINS)) {
DRM_ERROR("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d " "obj %p target %d offset %d "
"read %08x write %08x", "read %08x write %08x",
obj, reloc->target_handle, obj, reloc->target_handle,
...@@ -461,6 +462,54 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, ...@@ -461,6 +462,54 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret; return ret;
} }
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
int ret;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
return ret;
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
if (obj->tiling_mode) {
ret = i915_gem_object_get_fence(obj, ring);
if (ret)
goto err_unpin;
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
i915_gem_object_pin_fence(obj);
} else {
ret = i915_gem_object_put_fence(obj);
if (ret)
goto err_unpin;
}
}
obj->pending_fenced_gpu_access = need_fence;
}
entry->offset = obj->gtt_offset;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
return ret;
}
static int static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
...@@ -518,6 +567,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -518,6 +567,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable; bool need_fence, need_mappable;
if (!obj->gtt_space) if (!obj->gtt_space)
continue; continue;
...@@ -532,58 +582,47 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -532,58 +582,47 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable)) (need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
else else
ret = i915_gem_object_pin(obj, ret = pin_and_fence_object(obj, ring);
entry->alignment,
need_mappable);
if (ret) if (ret)
goto err; goto err;
entry++;
} }
/* Bind fresh objects */ /* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; if (obj->gtt_space)
bool need_fence; continue;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
if (!obj->gtt_space) {
bool need_mappable =
entry->relocation_count ? true : need_fence;
ret = i915_gem_object_pin(obj,
entry->alignment,
need_mappable);
if (ret)
break;
}
if (has_fenced_gpu_access) { ret = pin_and_fence_object(obj, ring);
if (need_fence) { if (ret) {
ret = i915_gem_object_get_fence(obj, ring); int ret_ignore;
if (ret)
break; /* This can potentially raise a harmless
} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && * -EINVAL if we failed to bind in the above
obj->tiling_mode == I915_TILING_NONE) { * call. It cannot raise -EINTR since we know
/* XXX pipelined! */ * that the bo is freshly bound and so will
ret = i915_gem_object_put_fence(obj); * not need to be flushed or waited upon.
if (ret) */
break; ret_ignore = i915_gem_object_unbind(obj);
} (void)ret_ignore;
obj->pending_fenced_gpu_access = need_fence; WARN_ON(obj->gtt_space);
break;
} }
entry->offset = obj->gtt_offset;
} }
/* Decrement pin count for bound objects */ /* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
if (obj->gtt_space) struct drm_i915_gem_exec_object2 *entry;
i915_gem_object_unpin(obj);
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
} }
if (ret != -ENOSPC || retry > 1) if (ret != -ENOSPC || retry > 1)
...@@ -600,16 +639,19 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -600,16 +639,19 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
} while (1); } while (1);
err: err:
obj = list_entry(obj->exec_list.prev, list_for_each_entry_continue_reverse(obj, objects, exec_list) {
struct drm_i915_gem_object, struct drm_i915_gem_exec_object2 *entry;
exec_list);
while (objects != &obj->exec_list) { if (!obj->gtt_space)
if (obj->gtt_space) continue;
i915_gem_object_unpin(obj);
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
obj = list_entry(obj->exec_list.prev, i915_gem_object_unpin(obj);
struct drm_i915_gem_object,
exec_list);
} }
return ret; return ret;
...@@ -1186,7 +1228,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1186,7 +1228,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall * so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters. * the GPU in order to reset the counters.
*/ */
ret = i915_gpu_idle(dev); ret = i915_gpu_idle(dev, true);
if (ret) if (ret)
goto err; goto err;
......
...@@ -55,7 +55,7 @@ static bool do_idling(struct drm_i915_private *dev_priv) ...@@ -55,7 +55,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false; dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) { if (i915_gpu_idle(dev_priv->dev, false)) {
DRM_ERROR("Couldn't idle GPU\n"); DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */ /* Wait a bit, in hopes it avoids the hang */
udelay(10); udelay(10);
......
...@@ -107,10 +107,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) ...@@ -107,10 +107,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/ */
swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev)) { } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc; uint32_t dcc;
/* On mobile 9xx chipsets, channel interleave by the CPU is /* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU * determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved, * nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit * the GPU's interleave is bit 9 and 10 for X tiled, and bit
......
...@@ -720,7 +720,6 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -720,7 +720,6 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
reloc_offset = src->gtt_offset; reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) { for (page = 0; page < page_count; page++) {
unsigned long flags; unsigned long flags;
void __iomem *s;
void *d; void *d;
d = kmalloc(PAGE_SIZE, GFP_ATOMIC); d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
...@@ -728,10 +727,29 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -728,10 +727,29 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind; goto unwind;
local_irq_save(flags); local_irq_save(flags);
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
reloc_offset); void __iomem *s;
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s); /* Simply ignore tiling or any overlapping fence.
* It's part of the error state, and this hopefully
* captures what the GPU read.
*/
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else {
void *s;
drm_clflush_pages(&src->pages[page], 1);
s = kmap_atomic(src->pages[page]);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s);
drm_clflush_pages(&src->pages[page], 1);
}
local_irq_restore(flags); local_irq_restore(flags);
dst->pages[page] = d; dst->pages[page] = d;
...@@ -804,7 +822,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, ...@@ -804,7 +822,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
err->tiling = obj->tiling_mode; err->tiling = obj->tiling_mode;
err->dirty = obj->dirty; err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED; err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->ring = obj->ring ? obj->ring->id : 0; err->ring = obj->ring ? obj->ring->id : -1;
err->cache_level = obj->cache_level; err->cache_level = obj->cache_level;
if (++i == count) if (++i == count)
...@@ -876,6 +894,39 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, ...@@ -876,6 +894,39 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
return NULL; return NULL;
} }
static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
}
if (INTEL_INFO(dev)->gen >= 4) {
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
if (ring->id == RCS) {
error->instdone1 = I915_READ(INSTDONE1);
error->bbaddr = I915_READ64(BB_ADDR);
}
} else {
error->ipeir[ring->id] = I915_READ(IPEIR);
error->ipehr[ring->id] = I915_READ(IPEHR);
error->instdone[ring->id] = I915_READ(INSTDONE);
}
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
}
/** /**
* i915_capture_error_state - capture an error record for later analysis * i915_capture_error_state - capture an error record for later analysis
* @dev: drm device * @dev: drm device
...@@ -900,7 +951,7 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -900,7 +951,7 @@ static void i915_capture_error_state(struct drm_device *dev)
return; return;
/* Account for pipe specific data like PIPE*STAT */ /* Account for pipe specific data like PIPE*STAT */
error = kmalloc(sizeof(*error), GFP_ATOMIC); error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error) { if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return; return;
...@@ -909,47 +960,22 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -909,47 +960,22 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index); dev->primary->index);
error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR); error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER); error->pgtbl_er = I915_READ(PGTBL_ER);
for_each_pipe(pipe) for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
error->instpm = I915_READ(INSTPM);
error->error = 0;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
error->error = I915_READ(ERROR_GEN6); error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
error->bcs_acthd = I915_READ(BCS_ACTHD);
error->bcs_ipehr = I915_READ(BCS_IPEHR);
error->bcs_ipeir = I915_READ(BCS_IPEIR);
error->bcs_instdone = I915_READ(BCS_INSTDONE);
error->bcs_seqno = 0;
if (dev_priv->ring[BCS].get_seqno)
error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
error->vcs_acthd = I915_READ(VCS_ACTHD);
error->vcs_ipehr = I915_READ(VCS_IPEHR);
error->vcs_ipeir = I915_READ(VCS_IPEIR);
error->vcs_instdone = I915_READ(VCS_INSTDONE);
error->vcs_seqno = 0;
if (dev_priv->ring[VCS].get_seqno)
error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
}
if (INTEL_INFO(dev)->gen >= 4) {
error->ipeir = I915_READ(IPEIR_I965);
error->ipehr = I915_READ(IPEHR_I965);
error->instdone = I915_READ(INSTDONE_I965);
error->instps = I915_READ(INSTPS);
error->instdone1 = I915_READ(INSTDONE1);
error->acthd = I915_READ(ACTHD_I965);
error->bbaddr = I915_READ64(BB_ADDR);
} else {
error->ipeir = I915_READ(IPEIR);
error->ipehr = I915_READ(IPEHR);
error->instdone = I915_READ(INSTDONE);
error->acthd = I915_READ(ACTHD);
error->bbaddr = 0;
} }
i915_record_ring_state(dev, error, &dev_priv->ring[RCS]);
if (HAS_BLT(dev))
i915_record_ring_state(dev, error, &dev_priv->ring[BCS]);
if (HAS_BSD(dev))
i915_record_ring_state(dev, error, &dev_priv->ring[VCS]);
i915_gem_record_fences(dev, error); i915_gem_record_fences(dev, error);
/* Record the active batch and ring buffers */ /* Record the active batch and ring buffers */
...@@ -1017,11 +1043,12 @@ void i915_destroy_error_state(struct drm_device *dev) ...@@ -1017,11 +1043,12 @@ void i915_destroy_error_state(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error; struct drm_i915_error_state *error;
unsigned long flags;
spin_lock(&dev_priv->error_lock); spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error; error = dev_priv->first_error;
dev_priv->first_error = NULL; dev_priv->first_error = NULL;
spin_unlock(&dev_priv->error_lock); spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error) if (error)
i915_error_state_free(dev, error); i915_error_state_free(dev, error);
...@@ -1698,6 +1725,7 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1698,6 +1725,7 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 == instdone1) { dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) { if (dev_priv->hangcheck_count++ > 1) {
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
i915_handle_error(dev, true);
if (!IS_GEN2(dev)) { if (!IS_GEN2(dev)) {
/* Is the chip hanging on a WAIT_FOR_EVENT? /* Is the chip hanging on a WAIT_FOR_EVENT?
...@@ -1705,7 +1733,6 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1705,7 +1733,6 @@ void i915_hangcheck_elapsed(unsigned long data)
* and break the hang. This should work on * and break the hang. This should work on
* all but the second generation chipsets. * all but the second generation chipsets.
*/ */
if (kick_ring(&dev_priv->ring[RCS])) if (kick_ring(&dev_priv->ring[RCS]))
goto repeat; goto repeat;
...@@ -1718,7 +1745,6 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1718,7 +1745,6 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat; goto repeat;
} }
i915_handle_error(dev, true);
return; return;
} }
} else { } else {
......
...@@ -319,6 +319,8 @@ ...@@ -319,6 +319,8 @@
#define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RENDER_HWS_PGA_GEN7 (0x04080) #define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
#define BSD_HWS_PGA_GEN7 (0x04180) #define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280) #define BLT_HWS_PGA_GEN7 (0x04280)
#define RING_ACTHD(base) ((base)+0x74) #define RING_ACTHD(base) ((base)+0x74)
...@@ -352,6 +354,12 @@ ...@@ -352,6 +354,12 @@
#define IPEIR_I965 0x02064 #define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068 #define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c #define INSTDONE_I965 0x0206c
#define RING_IPEIR(base) ((base)+0x64)
#define RING_IPEHR(base) ((base)+0x68)
#define RING_INSTDONE(base) ((base)+0x6c)
#define RING_INSTPS(base) ((base)+0x70)
#define RING_DMA_FADD(base) ((base)+0x78)
#define RING_INSTPM(base) ((base)+0xc0)
#define INSTPS 0x02070 /* 965+ only */ #define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */ #define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074 #define ACTHD_I965 0x02074
...@@ -365,14 +373,6 @@ ...@@ -365,14 +373,6 @@
#define INSTDONE 0x02090 #define INSTDONE 0x02090
#define NOPID 0x02094 #define NOPID 0x02094
#define HWSTAM 0x02098 #define HWSTAM 0x02098
#define VCS_INSTDONE 0x1206C
#define VCS_IPEIR 0x12064
#define VCS_IPEHR 0x12068
#define VCS_ACTHD 0x12074
#define BCS_INSTDONE 0x2206C
#define BCS_IPEIR 0x22064
#define BCS_IPEHR 0x22068
#define BCS_ACTHD 0x22074
#define ERROR_GEN6 0x040a0 #define ERROR_GEN6 0x040a0
...@@ -391,7 +391,7 @@ ...@@ -391,7 +391,7 @@
#define MI_MODE 0x0209c #define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6) # define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 11) # define MI_FLUSH_ENABLE (1 << 12)
#define GFX_MODE 0x02520 #define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c #define GFX_MODE_GEN7 0x0229c
...@@ -3742,4 +3742,16 @@ ...@@ -3742,4 +3742,16 @@
*/ */
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
#define IBX_AUD_CONFIG_A 0xe2000
#define CPT_AUD_CONFIG_A 0xe5000
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
#endif /* _I915_REG_H_ */ #endif /* _I915_REG_H_ */
...@@ -936,6 +936,10 @@ void assert_pipe(struct drm_i915_private *dev_priv, ...@@ -936,6 +936,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
u32 val; u32 val;
bool cur_state; bool cur_state;
/* if we need the pipe A quirk it must be always on */
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
reg = PIPECONF(pipe); reg = PIPECONF(pipe);
val = I915_READ(reg); val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE); cur_state = !!(val & PIPECONF_ENABLE);
...@@ -2037,6 +2041,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -2037,6 +2041,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
ret = i915_gem_object_get_fence(obj, pipelined); ret = i915_gem_object_get_fence(obj, pipelined);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
i915_gem_object_pin_fence(obj);
} }
dev_priv->mm.interruptible = true; dev_priv->mm.interruptible = true;
...@@ -2049,6 +2055,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -2049,6 +2055,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
return ret; return ret;
} }
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_fence(obj);
i915_gem_object_unpin(obj);
}
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y) int x, int y)
{ {
...@@ -2280,7 +2292,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -2280,7 +2292,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET); LEAVE_ATOMIC_MODE_SET);
if (ret) { if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n"); DRM_ERROR("failed to update base address\n");
return ret; return ret;
...@@ -2288,7 +2300,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -2288,7 +2300,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) { if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe); intel_wait_for_vblank(dev, intel_crtc->pipe);
i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -3351,7 +3363,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc) ...@@ -3351,7 +3363,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
if (crtc->fb) { if (crtc->fb) {
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
} }
...@@ -4548,6 +4560,7 @@ void sandybridge_update_wm(struct drm_device *dev) ...@@ -4548,6 +4560,7 @@ void sandybridge_update_wm(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm; int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled; unsigned int enabled;
...@@ -4556,8 +4569,10 @@ void sandybridge_update_wm(struct drm_device *dev) ...@@ -4556,8 +4569,10 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency, &sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency, &sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) { &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK, val = I915_READ(WM0_PIPEA_ILK);
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEA_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe A -" DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n", " plane %d, " "cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
...@@ -4568,8 +4583,10 @@ void sandybridge_update_wm(struct drm_device *dev) ...@@ -4568,8 +4583,10 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency, &sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency, &sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) { &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK, val = I915_READ(WM0_PIPEB_ILK);
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEB_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe B -" DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n", " plane %d, cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
...@@ -4582,8 +4599,10 @@ void sandybridge_update_wm(struct drm_device *dev) ...@@ -4582,8 +4599,10 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency, &sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency, &sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) { &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEC_IVB, val = I915_READ(WM0_PIPEC_IVB);
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEC_IVB, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe C -" DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n", " plane %d, cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
...@@ -4727,6 +4746,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, ...@@ -4727,6 +4746,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
u32 val;
int sprite_wm, reg; int sprite_wm, reg;
int ret; int ret;
...@@ -4753,7 +4773,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, ...@@ -4753,7 +4773,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
return; return;
} }
I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); val = I915_READ(reg);
val &= ~WM0_PIPE_SPRITE_MASK;
I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
...@@ -6130,15 +6152,18 @@ static void ironlake_write_eld(struct drm_connector *connector, ...@@ -6130,15 +6152,18 @@ static void ironlake_write_eld(struct drm_connector *connector,
uint32_t i; uint32_t i;
int len; int len;
int hdmiw_hdmiedid; int hdmiw_hdmiedid;
int aud_config;
int aud_cntl_st; int aud_cntl_st;
int aud_cntrl_st2; int aud_cntrl_st2;
if (HAS_PCH_IBX(connector->dev)) { if (HAS_PCH_IBX(connector->dev)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
aud_config = IBX_AUD_CONFIG_A;
aud_cntl_st = IBX_AUD_CNTL_ST_A; aud_cntl_st = IBX_AUD_CNTL_ST_A;
aud_cntrl_st2 = IBX_AUD_CNTL_ST2; aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else { } else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
aud_config = CPT_AUD_CONFIG_A;
aud_cntl_st = CPT_AUD_CNTL_ST_A; aud_cntl_st = CPT_AUD_CNTL_ST_A;
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
} }
...@@ -6146,6 +6171,7 @@ static void ironlake_write_eld(struct drm_connector *connector, ...@@ -6146,6 +6171,7 @@ static void ironlake_write_eld(struct drm_connector *connector,
i = to_intel_crtc(crtc)->pipe; i = to_intel_crtc(crtc)->pipe;
hdmiw_hdmiedid += i * 0x100; hdmiw_hdmiedid += i * 0x100;
aud_cntl_st += i * 0x100; aud_cntl_st += i * 0x100;
aud_config += i * 0x100;
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
...@@ -6165,7 +6191,9 @@ static void ironlake_write_eld(struct drm_connector *connector, ...@@ -6165,7 +6191,9 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
} I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
} else
I915_WRITE(aud_config, 0);
if (intel_eld_uptodate(connector, if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv, aud_cntrl_st2, eldv,
...@@ -7141,7 +7169,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) ...@@ -7141,7 +7169,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work); container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex); mutex_lock(&work->dev->struct_mutex);
i915_gem_object_unpin(work->old_fb_obj); intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base); drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&work->old_fb_obj->base);
...@@ -7291,7 +7319,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, ...@@ -7291,7 +7319,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitches[0]); OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset); OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP); OUT_RING(0); /* aux display base address, unused */
ADVANCE_LP_RING(); ADVANCE_LP_RING();
out: out:
return ret; return ret;
...@@ -7883,7 +7911,8 @@ int intel_framebuffer_init(struct drm_device *dev, ...@@ -7883,7 +7911,8 @@ int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_VYUY: case DRM_FORMAT_VYUY:
break; break;
default: default:
DRM_ERROR("unsupported pixel format\n"); DRM_DEBUG_KMS("unsupported pixel format %u\n",
mode_cmd->pixel_format);
return -EINVAL; return -EINVAL;
} }
......
...@@ -374,6 +374,7 @@ extern void intel_init_emon(struct drm_device *dev); ...@@ -374,6 +374,7 @@ extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined); struct intel_ring_buffer *pipelined);
extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
extern int intel_framebuffer_init(struct drm_device *dev, extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb, struct intel_framebuffer *ifb,
......
...@@ -227,7 +227,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, ...@@ -227,7 +227,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
} }
overlay->last_flip_req = request->seqno; overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail; overlay->flip_tail = tail;
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
true);
if (ret) if (ret)
return ret; return ret;
...@@ -448,7 +449,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) ...@@ -448,7 +449,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
if (overlay->last_flip_req == 0) if (overlay->last_flip_req == 0)
return 0; return 0;
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
true);
if (ret) if (ret)
return ret; return ret;
......
...@@ -399,8 +399,6 @@ static int init_render_ring(struct intel_ring_buffer *ring) ...@@ -399,8 +399,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen > 3) { if (INTEL_INFO(dev)->gen > 3) {
int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev) || IS_GEN7(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode); I915_WRITE(MI_MODE, mode);
if (IS_GEN7(dev)) if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7, I915_WRITE(GFX_MODE_GEN7,
...@@ -744,13 +742,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) ...@@ -744,13 +742,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
*/ */
if (IS_GEN7(dev)) { if (IS_GEN7(dev)) {
switch (ring->id) { switch (ring->id) {
case RING_RENDER: case RCS:
mmio = RENDER_HWS_PGA_GEN7; mmio = RENDER_HWS_PGA_GEN7;
break; break;
case RING_BLT: case BCS:
mmio = BLT_HWS_PGA_GEN7; mmio = BLT_HWS_PGA_GEN7;
break; break;
case RING_BSD: case VCS:
mmio = BSD_HWS_PGA_GEN7; mmio = BSD_HWS_PGA_GEN7;
break; break;
} }
...@@ -1212,7 +1210,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring) ...@@ -1212,7 +1210,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
static const struct intel_ring_buffer render_ring = { static const struct intel_ring_buffer render_ring = {
.name = "render ring", .name = "render ring",
.id = RING_RENDER, .id = RCS,
.mmio_base = RENDER_RING_BASE, .mmio_base = RENDER_RING_BASE,
.size = 32 * PAGE_SIZE, .size = 32 * PAGE_SIZE,
.init = init_render_ring, .init = init_render_ring,
...@@ -1235,7 +1233,7 @@ static const struct intel_ring_buffer render_ring = { ...@@ -1235,7 +1233,7 @@ static const struct intel_ring_buffer render_ring = {
static const struct intel_ring_buffer bsd_ring = { static const struct intel_ring_buffer bsd_ring = {
.name = "bsd ring", .name = "bsd ring",
.id = RING_BSD, .id = VCS,
.mmio_base = BSD_RING_BASE, .mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE, .size = 32 * PAGE_SIZE,
.init = init_ring_common, .init = init_ring_common,
...@@ -1345,7 +1343,7 @@ gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) ...@@ -1345,7 +1343,7 @@ gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
/* ring buffer for Video Codec for Gen6+ */ /* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = { static const struct intel_ring_buffer gen6_bsd_ring = {
.name = "gen6 bsd ring", .name = "gen6 bsd ring",
.id = RING_BSD, .id = VCS,
.mmio_base = GEN6_BSD_RING_BASE, .mmio_base = GEN6_BSD_RING_BASE,
.size = 32 * PAGE_SIZE, .size = 32 * PAGE_SIZE,
.init = init_ring_common, .init = init_ring_common,
...@@ -1381,79 +1379,13 @@ blt_ring_put_irq(struct intel_ring_buffer *ring) ...@@ -1381,79 +1379,13 @@ blt_ring_put_irq(struct intel_ring_buffer *ring)
GEN6_BLITTER_USER_INTERRUPT); GEN6_BLITTER_USER_INTERRUPT);
} }
/* Workaround for some stepping of SNB,
* each time when BLT engine ring tail moved,
* the first command in the ring to be parsed
* should be MI_BATCH_BUFFER_START
*/
#define NEED_BLT_WORKAROUND(dev) \
(IS_GEN6(dev) && (dev->pdev->revision < 8))
static inline struct drm_i915_gem_object *
to_blt_workaround(struct intel_ring_buffer *ring)
{
return ring->private;
}
static int blt_ring_init(struct intel_ring_buffer *ring)
{
if (NEED_BLT_WORKAROUND(ring->dev)) {
struct drm_i915_gem_object *obj;
u32 *ptr;
int ret;
obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL)
return -ENOMEM;
ret = i915_gem_object_pin(obj, 4096, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
}
ptr = kmap(obj->pages[0]);
*ptr++ = MI_BATCH_BUFFER_END;
*ptr++ = MI_NOOP;
kunmap(obj->pages[0]);
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) {
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
ring->private = obj;
}
return init_ring_common(ring);
}
static int blt_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
if (ring->private) {
int ret = intel_ring_begin(ring, num_dwords+2);
if (ret)
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START);
intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
return 0;
} else
return intel_ring_begin(ring, 4);
}
static int blt_ring_flush(struct intel_ring_buffer *ring, static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush) u32 invalidate, u32 flush)
{ {
uint32_t cmd; uint32_t cmd;
int ret; int ret;
ret = blt_ring_begin(ring, 4); ret = intel_ring_begin(ring, 4);
if (ret) if (ret)
return ret; return ret;
...@@ -1468,22 +1400,12 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, ...@@ -1468,22 +1400,12 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return 0; return 0;
} }
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
{
if (!ring->private)
return;
i915_gem_object_unpin(ring->private);
drm_gem_object_unreference(ring->private);
ring->private = NULL;
}
static const struct intel_ring_buffer gen6_blt_ring = { static const struct intel_ring_buffer gen6_blt_ring = {
.name = "blt ring", .name = "blt ring",
.id = RING_BLT, .id = BCS,
.mmio_base = BLT_RING_BASE, .mmio_base = BLT_RING_BASE,
.size = 32 * PAGE_SIZE, .size = 32 * PAGE_SIZE,
.init = blt_ring_init, .init = init_ring_common,
.write_tail = ring_write_tail, .write_tail = ring_write_tail,
.flush = blt_ring_flush, .flush = blt_ring_flush,
.add_request = gen6_add_request, .add_request = gen6_add_request,
...@@ -1491,7 +1413,6 @@ static const struct intel_ring_buffer gen6_blt_ring = { ...@@ -1491,7 +1413,6 @@ static const struct intel_ring_buffer gen6_blt_ring = {
.irq_get = blt_ring_get_irq, .irq_get = blt_ring_get_irq,
.irq_put = blt_ring_put_irq, .irq_put = blt_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
.cleanup = blt_ring_cleanup,
.sync_to = gen6_blt_ring_sync_to, .sync_to = gen6_blt_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_BR, .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
MI_SEMAPHORE_SYNC_BV, MI_SEMAPHORE_SYNC_BV,
......
#ifndef _INTEL_RINGBUFFER_H_ #ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_
enum {
RCS = 0x0,
VCS,
BCS,
I915_NUM_RINGS,
};
struct intel_hw_status_page { struct intel_hw_status_page {
u32 __iomem *page_addr; u32 __iomem *page_addr;
unsigned int gfx_addr; unsigned int gfx_addr;
...@@ -36,10 +29,11 @@ struct intel_hw_status_page { ...@@ -36,10 +29,11 @@ struct intel_hw_status_page {
struct intel_ring_buffer { struct intel_ring_buffer {
const char *name; const char *name;
enum intel_ring_id { enum intel_ring_id {
RING_RENDER = 0x1, RCS = 0x0,
RING_BSD = 0x2, VCS,
RING_BLT = 0x4, BCS,
} id; } id;
#define I915_NUM_RINGS 3
u32 mmio_base; u32 mmio_base;
void __iomem *virtual_start; void __iomem *virtual_start;
struct drm_device *dev; struct drm_device *dev;
...@@ -119,6 +113,12 @@ struct intel_ring_buffer { ...@@ -119,6 +113,12 @@ struct intel_ring_buffer {
void *private; void *private;
}; };
static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring)
{
return 1 << ring->id;
}
static inline u32 static inline u32
intel_ring_sync_index(struct intel_ring_buffer *ring, intel_ring_sync_index(struct intel_ring_buffer *ring,
struct intel_ring_buffer *other) struct intel_ring_buffer *other)
......
...@@ -501,7 +501,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -501,7 +501,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
} }
i915_gem_object_unpin(old_obj); intel_unpin_fb_obj(old_obj);
} }
out_unlock: out_unlock:
...@@ -528,7 +528,7 @@ intel_disable_plane(struct drm_plane *plane) ...@@ -528,7 +528,7 @@ intel_disable_plane(struct drm_plane *plane)
goto out; goto out;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin(intel_plane->obj); intel_unpin_fb_obj(intel_plane->obj);
intel_plane->obj = NULL; intel_plane->obj = NULL;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment