Commit 784fe39f authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-intel-fixes' of ssh://master.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel

* 'drm-intel-fixes' of ssh://master.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel: (37 commits)
  drm/i915/execbuffer: Reorder binding of objects to favour restrictions
  drm/i915: If we hit OOM when allocating GTT pages, clear the aperture
  drm/i915/evict: Ensure we completely cleanup on failure
  drm/i915/execbuffer: Correctly clear the current object list upon EFAULT
  drm/i915/debugfs: Show all objects in the gtt
  drm/i915: Record AGP memory type upon error
  drm/i915: Periodically flush the active lists and requests
  drm/i915/gtt: Unmap the PCI pages after unbinding them from the GTT
  drm/i915: Record the error batchbuffer on each ring
  drm/i915: Include TLB miss overhead for computing WM
  drm/i915: Propagate error from flushing the ring
  drm/i915: detect & report PCH display error interrupts
  drm/i915: cleanup rc6 code
  drm/i915: fix rc6 enabling around suspend/resume
  drm/i915: re-enable rc6 support for Ironlake+
  drm/i915: Make the ring IMR handling private
  drm/i915/ringbuffer: Simplify the ring irq refcounting
  drm/i915/debugfs: Show the per-ring IMR
  drm/i915: Mask USER interrupts on gen6 (until required)
  drm/i915: Handle ringbuffer stalls when flushing
  ...
parents 4162cf64 6fe4f140
...@@ -94,6 +94,8 @@ ...@@ -94,6 +94,8 @@
#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8) #define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
#define G4x_GMCH_SIZE_VT_2M (0xc << 8) #define G4x_GMCH_SIZE_VT_2M (0xc << 8)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define I810_DRAM_CTL 0x3000 #define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001 #define I810_DRAM_ROW_0 0x00000001
#define I810_DRAM_ROW_0_SDRAM 0x00000001 #define I810_DRAM_ROW_0_SDRAM 0x00000001
......
...@@ -814,6 +814,12 @@ static bool intel_enable_gtt(void) ...@@ -814,6 +814,12 @@ static bool intel_enable_gtt(void)
} }
} }
/* On the resume path we may be adjusting the PGTBL value, so
* be paranoid and flush all chipset write buffers...
*/
if (INTEL_GTT_GEN >= 3)
writel(0, intel_private.registers+GFX_FLSH_CNTL);
reg = intel_private.registers+I810_PGETBL_CTL; reg = intel_private.registers+I810_PGETBL_CTL;
writel(intel_private.PGETBL_save, reg); writel(intel_private.PGETBL_save, reg);
if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
...@@ -823,6 +829,9 @@ static bool intel_enable_gtt(void) ...@@ -823,6 +829,9 @@ static bool intel_enable_gtt(void)
return false; return false;
} }
if (INTEL_GTT_GEN >= 3)
writel(0, intel_private.registers+GFX_FLSH_CNTL);
return true; return true;
} }
......
...@@ -106,10 +106,19 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj) ...@@ -106,10 +106,19 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
} }
} }
static const char *agp_type_str(int type)
{
switch (type) {
case 0: return " uncached";
case 1: return " snooped";
default: return "";
}
}
static void static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{ {
seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s", seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
&obj->base, &obj->base,
get_pin_flag(obj), get_pin_flag(obj),
get_tiling_flag(obj), get_tiling_flag(obj),
...@@ -118,6 +127,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -118,6 +127,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.write_domain, obj->base.write_domain,
obj->last_rendering_seqno, obj->last_rendering_seqno,
obj->last_fenced_seqno, obj->last_fenced_seqno,
agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
obj->dirty ? " dirty" : "", obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name) if (obj->base.name)
...@@ -276,6 +286,37 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -276,6 +286,37 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
return 0; return 0;
} }
static int i915_gem_gtt_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
seq_printf(m, " ");
describe_obj(m, obj);
seq_printf(m, "\n");
total_obj_size += obj->base.size;
total_gtt_size += obj->gtt_space->size;
count++;
}
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
static int i915_gem_pageflip_info(struct seq_file *m, void *data) static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{ {
...@@ -456,8 +497,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data) ...@@ -456,8 +497,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
} }
seq_printf(m, "Interrupts received: %d\n", seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received)); atomic_read(&dev_priv->irq_received));
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_RINGS; i++) {
if (IS_GEN6(dev)) {
seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
dev_priv->ring[i].name,
I915_READ_IMR(&dev_priv->ring[i]));
}
i915_ring_seqno_info(m, &dev_priv->ring[i]); i915_ring_seqno_info(m, &dev_priv->ring[i]);
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return 0; return 0;
...@@ -656,7 +703,7 @@ static void print_error_buffers(struct seq_file *m, ...@@ -656,7 +703,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count); seq_printf(m, "%s [%d]:\n", name, count);
while (count--) { while (count--) {
seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s", seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s%s",
err->gtt_offset, err->gtt_offset,
err->size, err->size,
err->read_domains, err->read_domains,
...@@ -666,7 +713,8 @@ static void print_error_buffers(struct seq_file *m, ...@@ -666,7 +713,8 @@ static void print_error_buffers(struct seq_file *m,
tiling_flag(err->tiling), tiling_flag(err->tiling),
dirty_flag(err->dirty), dirty_flag(err->dirty),
purgeable_flag(err->purgeable), purgeable_flag(err->purgeable),
ring_str(err->ring)); ring_str(err->ring),
agp_type_str(err->agp_type));
if (err->name) if (err->name)
seq_printf(m, " (name: %d)", err->name); seq_printf(m, " (name: %d)", err->name);
...@@ -744,7 +792,9 @@ static int i915_error_state(struct seq_file *m, void *unused) ...@@ -744,7 +792,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->batchbuffer[i]) { if (error->batchbuffer[i]) {
struct drm_i915_error_object *obj = error->batchbuffer[i]; struct drm_i915_error_object *obj = error->batchbuffer[i];
seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0; offset = 0;
for (page = 0; page < obj->page_count; page++) { for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) { for (elt = 0; elt < PAGE_SIZE/4; elt++) {
...@@ -890,7 +940,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) ...@@ -890,7 +940,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL); u32 rgvmodectl = I915_READ(MEMMODECTL);
u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); u32 rstdbyctl = I915_READ(RSTDBYCTL);
u16 crstandvid = I915_READ16(CRSTANDVID); u16 crstandvid = I915_READ16(CRSTANDVID);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
...@@ -913,6 +963,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused) ...@@ -913,6 +963,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
seq_printf(m, "Render standby enabled: %s\n", seq_printf(m, "Render standby enabled: %s\n",
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
seq_printf(m, "Current RS state: ");
switch (rstdbyctl & RSX_STATUS_MASK) {
case RSX_STATUS_ON:
seq_printf(m, "on\n");
break;
case RSX_STATUS_RC1:
seq_printf(m, "RC1\n");
break;
case RSX_STATUS_RC1E:
seq_printf(m, "RC1E\n");
break;
case RSX_STATUS_RS1:
seq_printf(m, "RS1\n");
break;
case RSX_STATUS_RS2:
seq_printf(m, "RS2 (RC6)\n");
break;
case RSX_STATUS_RS3:
seq_printf(m, "RC3 (RC6+)\n");
break;
default:
seq_printf(m, "unknown\n");
break;
}
return 0; return 0;
} }
...@@ -1187,6 +1261,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) ...@@ -1187,6 +1261,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
static struct drm_info_list i915_debugfs_list[] = { static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0, 0}, {"i915_capabilities", i915_capabilities, 0, 0},
{"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
......
...@@ -1962,13 +1962,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1962,13 +1962,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* enable GEM by default */ /* enable GEM by default */
dev_priv->has_gem = 1; dev_priv->has_gem = 1;
if (dev_priv->has_gem == 0 &&
drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
ret = -ENODEV;
goto out_workqueue_free;
}
dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
...@@ -2055,7 +2048,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -2055,7 +2048,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_teardown_gmbus(dev); intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
out_workqueue_free:
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
out_iomapfree: out_iomapfree:
io_mapping_free(dev_priv->mm.gtt_mapping); io_mapping_free(dev_priv->mm.gtt_mapping);
......
...@@ -49,6 +49,9 @@ module_param_named(powersave, i915_powersave, int, 0600); ...@@ -49,6 +49,9 @@ module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_lvds_downclock = 0; unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
static struct drm_driver driver; static struct drm_driver driver;
extern int intel_agp_enabled; extern int intel_agp_enabled;
...@@ -352,6 +355,9 @@ static int i915_drm_thaw(struct drm_device *dev) ...@@ -352,6 +355,9 @@ static int i915_drm_thaw(struct drm_device *dev)
/* Resume the modeset for every activated CRTC */ /* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev); drm_helper_resume_force_mode(dev);
if (dev_priv->renderctx && dev_priv->pwrctx)
ironlake_enable_rc6(dev);
} }
intel_opregion_init(dev); intel_opregion_init(dev);
...@@ -475,6 +481,9 @@ int i915_reset(struct drm_device *dev, u8 flags) ...@@ -475,6 +481,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
bool need_display = true; bool need_display = true;
int ret; int ret;
if (!i915_try_reset)
return 0;
if (!mutex_trylock(&dev->struct_mutex)) if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY; return -EBUSY;
......
...@@ -172,20 +172,21 @@ struct drm_i915_error_state { ...@@ -172,20 +172,21 @@ struct drm_i915_error_state {
int page_count; int page_count;
u32 gtt_offset; u32 gtt_offset;
u32 *pages[0]; u32 *pages[0];
} *ringbuffer, *batchbuffer[2]; } *ringbuffer, *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_buffer { struct drm_i915_error_buffer {
size_t size; u32 size;
u32 name; u32 name;
u32 seqno; u32 seqno;
u32 gtt_offset; u32 gtt_offset;
u32 read_domains; u32 read_domains;
u32 write_domain; u32 write_domain;
u32 fence_reg; s32 fence_reg:5;
s32 pinned:2; s32 pinned:2;
u32 tiling:2; u32 tiling:2;
u32 dirty:1; u32 dirty:1;
u32 purgeable:1; u32 purgeable:1;
u32 ring:4; u32 ring:4;
u32 agp_type:1;
} *active_bo, *pinned_bo; } *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count; u32 active_bo_count, pinned_bo_count;
struct intel_overlay_error_state *overlay; struct intel_overlay_error_state *overlay;
...@@ -332,6 +333,7 @@ typedef struct drm_i915_private { ...@@ -332,6 +333,7 @@ typedef struct drm_i915_private {
/* LVDS info */ /* LVDS info */
int backlight_level; /* restore backlight to this value */ int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *panel_fixed_mode; struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
...@@ -794,6 +796,7 @@ struct drm_i915_gem_object { ...@@ -794,6 +796,7 @@ struct drm_i915_gem_object {
*/ */
struct hlist_node exec_node; struct hlist_node exec_node;
unsigned long exec_handle; unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
/** /**
* Current offset of the object in GTT space. * Current offset of the object in GTT space.
...@@ -1006,12 +1009,6 @@ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); ...@@ -1006,12 +1009,6 @@ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data, extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
u32 mask);
extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
u32 mask);
void void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
...@@ -1091,10 +1088,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -1091,10 +1088,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj); int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_flush_ring(struct drm_device *dev, int __must_check i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains); uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
...@@ -1265,6 +1262,7 @@ extern void intel_disable_fbc(struct drm_device *dev); ...@@ -1265,6 +1262,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev); extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
......
...@@ -35,18 +35,18 @@ ...@@ -35,18 +35,18 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/pci.h> #include <linux/pci.h>
static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
bool write); bool write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset, uint64_t offset,
uint64_t size); uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment, unsigned alignment,
bool map_and_fenceable); bool map_and_fenceable);
static void i915_gem_clear_fence_reg(struct drm_device *dev, static void i915_gem_clear_fence_reg(struct drm_device *dev,
struct drm_i915_fence_reg *reg); struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev, static int i915_gem_phys_pwrite(struct drm_device *dev,
...@@ -1935,6 +1935,8 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1935,6 +1935,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
{ {
drm_i915_private_t *dev_priv; drm_i915_private_t *dev_priv;
struct drm_device *dev; struct drm_device *dev;
bool idle;
int i;
dev_priv = container_of(work, drm_i915_private_t, dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work); mm.retire_work.work);
...@@ -1948,11 +1950,31 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1948,11 +1950,31 @@ i915_gem_retire_work_handler(struct work_struct *work)
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended && /* Send a periodic flush down the ring so we don't hold onto GEM
(!list_empty(&dev_priv->ring[RCS].request_list) || * objects indefinitely.
!list_empty(&dev_priv->ring[VCS].request_list) || */
!list_empty(&dev_priv->ring[BCS].request_list))) idle = true;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request;
int ret;
ret = i915_gem_flush_ring(dev, ring, 0,
I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
i915_add_request(dev, NULL, request, ring))
kfree(request);
}
idle &= list_empty(&ring->request_list);
}
if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
...@@ -2142,25 +2164,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) ...@@ -2142,25 +2164,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret; return ret;
} }
void int
i915_gem_flush_ring(struct drm_device *dev, i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains) uint32_t flush_domains)
{ {
ring->flush(ring, invalidate_domains, flush_domains); int ret;
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
i915_gem_process_flushing_list(dev, flush_domains, ring); i915_gem_process_flushing_list(dev, flush_domains, ring);
return 0;
} }
static int i915_ring_idle(struct drm_device *dev, static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
int ret;
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0; return 0;
if (!list_empty(&ring->gpu_write_list)) if (!list_empty(&ring->gpu_write_list)) {
i915_gem_flush_ring(dev, ring, ret = i915_gem_flush_ring(dev, ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
return i915_wait_request(dev, return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring), i915_gem_next_request_seqno(dev, ring),
ring); ring);
...@@ -2370,10 +2404,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, ...@@ -2370,10 +2404,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
int ret; int ret;
if (obj->fenced_gpu_access) { if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(obj->base.dev, ret = i915_gem_flush_ring(obj->base.dev,
obj->last_fenced_ring, obj->last_fenced_ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
if (ret)
return ret;
}
obj->fenced_gpu_access = false; obj->fenced_gpu_access = false;
} }
...@@ -2393,6 +2430,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, ...@@ -2393,6 +2430,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
obj->last_fenced_ring = NULL; obj->last_fenced_ring = NULL;
} }
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
return 0; return 0;
} }
...@@ -2523,9 +2566,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -2523,9 +2566,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
return ret; return ret;
} else if (obj->tiling_changed) { } else if (obj->tiling_changed) {
if (obj->fenced_gpu_access) { if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(obj->base.dev, obj->ring, ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
if (ret)
return ret;
}
obj->fenced_gpu_access = false; obj->fenced_gpu_access = false;
} }
...@@ -2736,10 +2782,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ...@@ -2736,10 +2782,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->gtt_space = NULL; obj->gtt_space = NULL;
if (ret == -ENOMEM) { if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */ /* first try to reclaim some memory by clearing the GTT */
ret = i915_gem_evict_something(dev, size, ret = i915_gem_evict_everything(dev, false);
alignment,
map_and_fenceable);
if (ret) { if (ret) {
/* now try to shrink everyone else */ /* now try to shrink everyone else */
if (gfpmask) { if (gfpmask) {
...@@ -2747,7 +2791,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ...@@ -2747,7 +2791,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free; goto search_free;
} }
return ret; return -ENOMEM;
} }
goto search_free; goto search_free;
...@@ -2762,9 +2806,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ...@@ -2762,9 +2806,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
drm_mm_put_block(obj->gtt_space); drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL; obj->gtt_space = NULL;
ret = i915_gem_evict_something(dev, size, if (i915_gem_evict_everything(dev, false))
alignment, map_and_fenceable);
if (ret)
return ret; return ret;
goto search_free; goto search_free;
...@@ -2811,17 +2853,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) ...@@ -2811,17 +2853,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
} }
/** Flushes any GPU write domain for the object if it's dirty. */ /** Flushes any GPU write domain for the object if it's dirty. */
static void static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return; return 0;
/* Queue the GPU write cache flushing we need. */ /* Queue the GPU write cache flushing we need. */
i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
BUG_ON(obj->base.write_domain);
} }
/** Flushes the GTT write domain for the object if it's dirty. */ /** Flushes the GTT write domain for the object if it's dirty. */
...@@ -2833,10 +2874,16 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) ...@@ -2833,10 +2874,16 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return; return;
/* No actual flushing is required for the GTT write domain. Writes /* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's * to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache. * no chipset flush. It also doesn't land in render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
* the GATT itself.
*/ */
wmb();
i915_gem_release_mmap(obj); i915_gem_release_mmap(obj);
old_write_domain = obj->base.write_domain; old_write_domain = obj->base.write_domain;
...@@ -2882,7 +2929,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -2882,7 +2929,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->gtt_space == NULL) if (obj->gtt_space == NULL)
return -EINVAL; return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
if (obj->pending_gpu_write || write) { if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
...@@ -2927,7 +2977,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -2927,7 +2977,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
if (obj->gtt_space == NULL) if (obj->gtt_space == NULL)
return -EINVAL; return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
/* Currently, we are always called from an non-interruptible context. */ /* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) { if (pipelined != obj->ring) {
...@@ -2952,12 +3005,17 @@ int ...@@ -2952,12 +3005,17 @@ int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible) bool interruptible)
{ {
int ret;
if (!obj->active) if (!obj->active)
return 0; return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(obj->base.dev, obj->ring, ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
if (ret)
return ret;
}
return i915_gem_object_wait_rendering(obj, interruptible); return i915_gem_object_wait_rendering(obj, interruptible);
} }
...@@ -2974,7 +3032,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -2974,7 +3032,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains; uint32_t old_write_domain, old_read_domains;
int ret; int ret;
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
return ret; return ret;
...@@ -3069,7 +3130,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, ...@@ -3069,7 +3130,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (offset == 0 && size == obj->base.size) if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0); return i915_gem_object_set_to_cpu_domain(obj, 0);
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
return ret; return ret;
...@@ -3362,8 +3426,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ...@@ -3362,8 +3426,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* flush earlier is beneficial. * flush earlier is beneficial.
*/ */
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(dev, obj->ring, ret = i915_gem_flush_ring(dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
} else if (obj->ring->outstanding_lazy_request == } else if (obj->ring->outstanding_lazy_request ==
obj->last_rendering_seqno) { obj->last_rendering_seqno) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
......
...@@ -127,9 +127,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -127,9 +127,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
} }
/* Nothing found, clean up and bail out! */ /* Nothing found, clean up and bail out! */
list_for_each_entry(obj, &unwind_list, exec_list) { while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
ret = drm_mm_scan_remove_block(obj->gtt_space); ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret); BUG_ON(ret);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
} }
...@@ -162,6 +168,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -162,6 +168,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
exec_list); exec_list);
if (ret == 0) if (ret == 0)
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
list_del_init(&obj->exec_list); list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
} }
......
...@@ -268,7 +268,6 @@ eb_destroy(struct eb_objects *eb) ...@@ -268,7 +268,6 @@ eb_destroy(struct eb_objects *eb)
static int static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb, struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *reloc) struct drm_i915_gem_relocation_entry *reloc)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
...@@ -411,10 +410,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -411,10 +410,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb, struct eb_objects *eb)
struct drm_i915_gem_exec_object2 *entry)
{ {
struct drm_i915_gem_relocation_entry __user *user_relocs; struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret; int i, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
...@@ -426,7 +425,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, ...@@ -426,7 +425,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
sizeof(reloc))) sizeof(reloc)))
return -EFAULT; return -EFAULT;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc); ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
if (ret) if (ret)
return ret; return ret;
...@@ -442,13 +441,13 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, ...@@ -442,13 +441,13 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb, struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *relocs) struct drm_i915_gem_relocation_entry *relocs)
{ {
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret; int i, ret;
for (i = 0; i < entry->relocation_count; i++) { for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]); ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -459,8 +458,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, ...@@ -459,8 +458,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
static int static int
i915_gem_execbuffer_relocate(struct drm_device *dev, i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb, struct eb_objects *eb,
struct list_head *objects, struct list_head *objects)
struct drm_i915_gem_exec_object2 *exec)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret; int ret;
...@@ -468,7 +466,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, ...@@ -468,7 +466,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
obj->base.pending_read_domains = 0; obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++); ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -479,13 +477,36 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, ...@@ -479,13 +477,36 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
static int static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
struct list_head *objects, struct list_head *objects)
struct drm_i915_gem_exec_object2 *exec)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_exec_object2 *entry;
int ret, retry; int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
entry = obj->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
else
list_move_tail(&obj->exec_list, &ordered_objects);
}
list_splice(&ordered_objects, objects);
/* Attempt to pin all of the buffers into the GTT. /* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases: * This is done in 3 phases:
...@@ -504,14 +525,11 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -504,14 +525,11 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
ret = 0; ret = 0;
/* Unbind any ill-fitting objects or pin. */ /* Unbind any ill-fitting objects or pin. */
entry = exec;
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable; bool need_fence, need_mappable;
if (!obj->gtt_space)
if (!obj->gtt_space) {
entry++;
continue; continue;
}
need_fence = need_fence =
has_fenced_gpu_access && has_fenced_gpu_access &&
...@@ -534,8 +552,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -534,8 +552,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
} }
/* Bind fresh objects */ /* Bind fresh objects */
entry = exec;
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence; bool need_fence;
need_fence = need_fence =
...@@ -570,7 +588,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -570,7 +588,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
} }
entry->offset = obj->gtt_offset; entry->offset = obj->gtt_offset;
entry++;
} }
/* Decrement pin count for bound objects */ /* Decrement pin count for bound objects */
...@@ -622,7 +639,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -622,7 +639,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
int i, total, ret; int i, total, ret;
/* We may process another execbuffer during the unlock... */ /* We may process another execbuffer during the unlock... */
while (list_empty(objects)) { while (!list_empty(objects)) {
obj = list_first_entry(objects, obj = list_first_entry(objects,
struct drm_i915_gem_object, struct drm_i915_gem_object,
exec_list); exec_list);
...@@ -665,7 +682,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -665,7 +682,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
} }
/* reacquire the objects */ /* reacquire the objects */
INIT_LIST_HEAD(objects);
eb_reset(eb); eb_reset(eb);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
...@@ -681,10 +697,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -681,10 +697,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
list_add_tail(&obj->exec_list, objects); list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle; obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj); eb_add_object(eb, obj);
} }
ret = i915_gem_execbuffer_reserve(ring, file, objects, exec); ret = i915_gem_execbuffer_reserve(ring, file, objects);
if (ret) if (ret)
goto err; goto err;
...@@ -693,7 +710,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -693,7 +710,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
obj->base.pending_read_domains = 0; obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
exec,
reloc + total); reloc + total);
if (ret) if (ret)
goto err; goto err;
...@@ -713,25 +729,34 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -713,25 +729,34 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret; return ret;
} }
static void static int
i915_gem_execbuffer_flush(struct drm_device *dev, i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains, uint32_t flush_domains,
uint32_t flush_rings) uint32_t flush_rings)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int i; int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU) if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush(); intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) if (flush_rings & (1 << i)) {
i915_gem_flush_ring(dev, &dev_priv->ring[i], ret = i915_gem_flush_ring(dev,
invalidate_domains, &dev_priv->ring[i],
flush_domains); invalidate_domains,
flush_domains);
if (ret)
return ret;
}
} }
return 0;
} }
static int static int
...@@ -795,10 +820,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, ...@@ -795,10 +820,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
cd.invalidate_domains, cd.invalidate_domains,
cd.flush_domains); cd.flush_domains);
#endif #endif
i915_gem_execbuffer_flush(ring->dev, ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains, cd.invalidate_domains,
cd.flush_domains, cd.flush_domains,
cd.flush_rings); cd.flush_rings);
if (ret)
return ret;
} }
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
...@@ -921,7 +948,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, ...@@ -921,7 +948,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
u32 flush_domains; u32 invalidate;
/* /*
* Ensure that the commands in the batch buffer are * Ensure that the commands in the batch buffer are
...@@ -929,11 +956,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, ...@@ -929,11 +956,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
* *
* The sampler always gets flushed on i965 (sigh). * The sampler always gets flushed on i965 (sigh).
*/ */
flush_domains = 0; invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
flush_domains |= I915_GEM_DOMAIN_SAMPLER; invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); i915_gem_next_request_seqno(dev, ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */ /* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL); request = kzalloc(sizeof(*request), GFP_KERNEL);
...@@ -1098,16 +1127,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1098,16 +1127,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
list_add_tail(&obj->exec_list, &objects); list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle; obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj); eb_add_object(eb, obj);
} }
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */ /* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec); ret = i915_gem_execbuffer_reserve(ring, file, &objects);
if (ret) if (ret)
goto err; goto err;
/* The objects are in their final locations, apply the relocations. */ /* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec); ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (ret) { if (ret) {
if (ret == -EFAULT) { if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
...@@ -1121,9 +1156,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1121,9 +1156,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
/* Set the pending read domains for the batch buffer to COMMAND */ /* Set the pending read domains for the batch buffer to COMMAND */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
if (batch_obj->base.pending_write_domain) { if (batch_obj->base.pending_write_domain) {
DRM_ERROR("Attempting to use self-modifying batch buffer\n"); DRM_ERROR("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL; ret = -EINVAL;
...@@ -1340,4 +1372,3 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -1340,4 +1372,3 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
drm_free_large(exec2_list); drm_free_large(exec2_list);
return ret; return ret;
} }
...@@ -85,15 +85,11 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) ...@@ -85,15 +85,11 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->base.dev; intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
struct drm_i915_private *dev_priv = dev->dev_private; obj->base.size >> PAGE_SHIFT);
if (dev_priv->mm.gtt->needs_dmar) { if (obj->sg_list) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL; obj->sg_list = NULL;
obj->num_sg = 0;
} }
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
} }
This diff is collapsed.
...@@ -145,6 +145,8 @@ ...@@ -145,6 +145,8 @@
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0) #define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11,0) #define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
#define MI_OVERLAY_CONTINUE (0x0<<21) #define MI_OVERLAY_CONTINUE (0x0<<21)
...@@ -159,6 +161,7 @@ ...@@ -159,6 +161,7 @@
#define MI_MM_SPACE_PHYSICAL (0<<8) #define MI_MM_SPACE_PHYSICAL (0<<8)
#define MI_SAVE_EXT_STATE_EN (1<<3) #define MI_SAVE_EXT_STATE_EN (1<<3)
#define MI_RESTORE_EXT_STATE_EN (1<<2) #define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0) #define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
...@@ -288,6 +291,7 @@ ...@@ -288,6 +291,7 @@
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RING_ACTHD(base) ((base)+0x74) #define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94) #define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
#define TAIL_ADDR 0x001FFFF8 #define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000 #define HEAD_WRAP_ONE 0x00200000
...@@ -1130,9 +1134,50 @@ ...@@ -1130,9 +1134,50 @@
#define RCBMINAVG 0x111a0 #define RCBMINAVG 0x111a0
#define RCUPEI 0x111b0 #define RCUPEI 0x111b0
#define RCDNEI 0x111b4 #define RCDNEI 0x111b4
#define MCHBAR_RENDER_STANDBY 0x111b8 #define RSTDBYCTL 0x111b8
#define RCX_SW_EXIT (1<<23) #define RS1EN (1<<31)
#define RSX_STATUS_MASK 0x00700000 #define RS2EN (1<<30)
#define RS3EN (1<<29)
#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
#define RSX_STATUS_MASK (7<<20)
#define RSX_STATUS_ON (0<<20)
#define RSX_STATUS_RC1 (1<<20)
#define RSX_STATUS_RC1E (2<<20)
#define RSX_STATUS_RS1 (3<<20)
#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
#define RSX_STATUS_RSVD2 (7<<20)
#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
#define JRSC (1<<17) /* rsx coupled to cpu c-state */
#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
#define RS1CONTSAV_MASK (3<<14)
#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
#define RS1CONTSAV_RSVD (1<<14)
#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
#define NORMSLEXLAT_MASK (3<<12)
#define SLOW_RS123 (0<<12)
#define SLOW_RS23 (1<<12)
#define SLOW_RS3 (2<<12)
#define NORMAL_RS123 (3<<12)
#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
#define RS_CSTATE_MASK (3<<4)
#define RS_CSTATE_C367_RS1 (0<<4)
#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
#define RS_CSTATE_RSVD (2<<4)
#define RS_CSTATE_C367_RS2 (3<<4)
#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
#define VIDCTL 0x111c0 #define VIDCTL 0x111c0
#define VIDSTS 0x111c8 #define VIDSTS 0x111c8
#define VIDSTART 0x111cc /* 8 bits */ #define VIDSTART 0x111cc /* 8 bits */
...@@ -2345,8 +2390,13 @@ ...@@ -2345,8 +2390,13 @@
/* Memory latency timer register */ /* Memory latency timer register */
#define MLTR_ILK 0x11222 #define MLTR_ILK 0x11222
#define MLTR_WM1_SHIFT 0
#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */ /* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f #define ILK_SRLT_MASK 0x3f
#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
/* define the fifo size on Ironlake */ /* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128 #define ILK_DISPLAY_FIFO 128
...@@ -2728,12 +2778,41 @@ ...@@ -2728,12 +2778,41 @@
/* PCH */ /* PCH */
/* south display engine interrupt */ /* south display engine interrupt */
#define SDE_AUDIO_POWER_D (1 << 27)
#define SDE_AUDIO_POWER_C (1 << 26)
#define SDE_AUDIO_POWER_B (1 << 25)
#define SDE_AUDIO_POWER_SHIFT (25)
#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
#define SDE_GMBUS (1 << 24)
#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
#define SDE_AUDIO_HDCP_MASK (3 << 22)
#define SDE_AUDIO_TRANSB (1 << 21)
#define SDE_AUDIO_TRANSA (1 << 20)
#define SDE_AUDIO_TRANS_MASK (3 << 20)
#define SDE_POISON (1 << 19)
/* 18 reserved */
#define SDE_FDI_RXB (1 << 17)
#define SDE_FDI_RXA (1 << 16)
#define SDE_FDI_MASK (3 << 16)
#define SDE_AUXD (1 << 15)
#define SDE_AUXC (1 << 14)
#define SDE_AUXB (1 << 13)
#define SDE_AUX_MASK (7 << 13)
/* 12 reserved */
#define SDE_CRT_HOTPLUG (1 << 11) #define SDE_CRT_HOTPLUG (1 << 11)
#define SDE_PORTD_HOTPLUG (1 << 10) #define SDE_PORTD_HOTPLUG (1 << 10)
#define SDE_PORTC_HOTPLUG (1 << 9) #define SDE_PORTC_HOTPLUG (1 << 9)
#define SDE_PORTB_HOTPLUG (1 << 8) #define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6) #define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8) #define SDE_HOTPLUG_MASK (0xf << 8)
#define SDE_TRANSB_CRC_DONE (1 << 5)
#define SDE_TRANSB_CRC_ERR (1 << 4)
#define SDE_TRANSB_FIFO_UNDER (1 << 3)
#define SDE_TRANSA_CRC_DONE (1 << 2)
#define SDE_TRANSA_CRC_ERR (1 << 1)
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
/* CPT */ /* CPT */
#define SDE_CRT_HOTPLUG_CPT (1 << 19) #define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23) #define SDE_PORTD_HOTPLUG_CPT (1 << 23)
...@@ -3174,10 +3253,11 @@ ...@@ -3174,10 +3253,11 @@
#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
/* SNB B-stepping */ /* SNB B-stepping */
#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) #define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) #define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) #define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) #define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
#define FORCEWAKE 0xA18C #define FORCEWAKE 0xA18C
...@@ -3239,6 +3319,7 @@ ...@@ -3239,6 +3319,7 @@
#define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31) #define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_DATA 0x138128
......
...@@ -740,7 +740,7 @@ void i915_restore_display(struct drm_device *dev) ...@@ -740,7 +740,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
I915_WRITE(MCHBAR_RENDER_STANDBY, I915_WRITE(RSTDBYCTL,
dev_priv->saveMCHBAR_RENDER_STANDBY); dev_priv->saveMCHBAR_RENDER_STANDBY);
} else { } else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
...@@ -811,7 +811,7 @@ int i915_save_state(struct drm_device *dev) ...@@ -811,7 +811,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY = dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(MCHBAR_RENDER_STANDBY); I915_READ(RSTDBYCTL);
} else { } else {
dev_priv->saveIER = I915_READ(IER); dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR); dev_priv->saveIMR = I915_READ(IMR);
...@@ -822,10 +822,6 @@ int i915_save_state(struct drm_device *dev) ...@@ -822,10 +822,6 @@ int i915_save_state(struct drm_device *dev)
if (IS_GEN6(dev)) if (IS_GEN6(dev))
gen6_disable_rps(dev); gen6_disable_rps(dev);
/* XXX disabling the clock gating breaks suspend on gm45
intel_disable_clock_gating(dev);
*/
/* Cache mode state */ /* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "drm.h" #include "drm.h"
#include "drm_crtc.h" #include "drm_crtc.h"
#include "drm_crtc_helper.h" #include "drm_crtc_helper.h"
#include "drm_edid.h"
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_drm.h" #include "i915_drm.h"
#include "i915_drv.h" #include "i915_drv.h"
...@@ -287,8 +288,9 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus) ...@@ -287,8 +288,9 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
} }
static bool intel_crt_detect_ddc(struct intel_crt *crt) static bool intel_crt_detect_ddc(struct drm_connector *connector)
{ {
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
/* CRT should always be at 0, but check anyway */ /* CRT should always be at 0, but check anyway */
...@@ -301,8 +303,26 @@ static bool intel_crt_detect_ddc(struct intel_crt *crt) ...@@ -301,8 +303,26 @@ static bool intel_crt_detect_ddc(struct intel_crt *crt)
} }
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); struct edid *edid;
return true; bool is_digital = false;
edid = drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device.
*/
if (edid != NULL) {
is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
connector->display_info.raw_edid = NULL;
kfree(edid);
}
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
}
} }
return false; return false;
...@@ -458,7 +478,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) ...@@ -458,7 +478,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
} }
} }
if (intel_crt_detect_ddc(crt)) if (intel_crt_detect_ddc(connector))
return connector_status_connected; return connector_status_connected;
if (!force) if (!force)
...@@ -472,7 +492,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) ...@@ -472,7 +492,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
crtc = intel_get_load_detect_pipe(&crt->base, connector, crtc = intel_get_load_detect_pipe(&crt->base, connector,
NULL, &dpms_mode); NULL, &dpms_mode);
if (crtc) { if (crtc) {
if (intel_crt_detect_ddc(crt)) if (intel_crt_detect_ddc(connector))
status = connector_status_connected; status = connector_status_connected;
else else
status = intel_crt_load_detect(crtc, crt); status = intel_crt_load_detect(crtc, crt);
......
This diff is collapsed.
...@@ -1153,18 +1153,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) ...@@ -1153,18 +1153,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
static uint32_t static uint32_t
intel_gen6_edp_signal_levels(uint8_t train_set) intel_gen6_edp_signal_levels(uint8_t train_set)
{ {
switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_400MV_0DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return EDP_LINK_TRAIN_400MV_6DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_800MV_0DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
default: default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
return EDP_LINK_TRAIN_400MV_0DB_SNB_B; "0x%x\n", signal_levels);
return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
} }
} }
...@@ -1334,17 +1343,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ...@@ -1334,17 +1343,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp->base.base.dev; struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false; bool channel_eq = false;
int tries; int tries, cr_tries;
u32 reg; u32 reg;
uint32_t DP = intel_dp->DP; uint32_t DP = intel_dp->DP;
/* channel equalization */ /* channel equalization */
tries = 0; tries = 0;
cr_tries = 0;
channel_eq = false; channel_eq = false;
for (;;) { for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels; uint32_t signal_levels;
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
intel_dp_link_down(intel_dp);
break;
}
if (IS_GEN6(dev) && is_edp(intel_dp)) { if (IS_GEN6(dev) && is_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
...@@ -1367,14 +1383,26 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ...@@ -1367,14 +1383,26 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (!intel_dp_get_link_status(intel_dp)) if (!intel_dp_get_link_status(intel_dp))
break; break;
/* Make sure clock is still ok */
if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
if (intel_channel_eq_ok(intel_dp)) { if (intel_channel_eq_ok(intel_dp)) {
channel_eq = true; channel_eq = true;
break; break;
} }
/* Try 5 times */ /* Try 5 times, then try clock recovery if that fails */
if (tries > 5) if (tries > 5) {
break; intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
tries = 0;
cr_tries++;
continue;
}
/* Compute new intel_dp->train_set as requested by target */ /* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp); intel_get_adjust_train(intel_dp);
......
...@@ -257,6 +257,9 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, ...@@ -257,6 +257,9 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev); extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder); extern void intel_encoder_prepare (struct drm_encoder *encoder);
......
...@@ -62,6 +62,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -62,6 +62,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_fb_helper_surface_size *sizes) struct drm_fb_helper_surface_size *sizes)
{ {
struct drm_device *dev = ifbdev->helper.dev; struct drm_device *dev = ifbdev->helper.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info; struct fb_info *info;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_mode_fb_cmd mode_cmd; struct drm_mode_fb_cmd mode_cmd;
...@@ -77,7 +78,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -77,7 +78,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mode_cmd.height = sizes->surface_height; mode_cmd.height = sizes->surface_height;
mode_cmd.bpp = sizes->surface_bpp; mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
mode_cmd.depth = sizes->surface_depth; mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height; size = mode_cmd.pitch * mode_cmd.height;
...@@ -120,6 +121,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -120,6 +121,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops; info->fbops = &intelfb_ops;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unpin;
}
/* setup aperture base/size for vesafb takeover */ /* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1); info->apertures = alloc_apertures(1);
if (!info->apertures) { if (!info->apertures) {
...@@ -127,10 +133,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -127,10 +133,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin; goto out_unpin;
} }
info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].base = dev->mode_config.fb_base;
if (!IS_GEN2(dev)) info->apertures->ranges[0].size =
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size; info->fix.smem_len = size;
...@@ -140,12 +144,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -140,12 +144,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
ret = -ENOSPC; ret = -ENOSPC;
goto out_unpin; goto out_unpin;
} }
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unpin;
}
info->screen_size = size; info->screen_size = size;
// memset(info->screen_base, 0, size); // memset(info->screen_base, 0, size);
......
...@@ -106,7 +106,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) ...@@ -106,7 +106,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg); POSTING_READ(lvds_reg);
intel_panel_set_backlight(dev, dev_priv->backlight_level); intel_panel_enable_backlight(dev);
} }
static void intel_lvds_disable(struct intel_lvds *intel_lvds) static void intel_lvds_disable(struct intel_lvds *intel_lvds)
...@@ -123,8 +123,7 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds) ...@@ -123,8 +123,7 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
lvds_reg = LVDS; lvds_reg = LVDS;
} }
dev_priv->backlight_level = intel_panel_get_backlight(dev); intel_panel_disable_backlight(dev);
intel_panel_set_backlight(dev, 0);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
...@@ -375,6 +374,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, ...@@ -375,6 +374,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
} }
out: out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
if (pfit_control != intel_lvds->pfit_control || if (pfit_control != intel_lvds->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control; intel_lvds->pfit_control = pfit_control;
...@@ -398,8 +401,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) ...@@ -398,8 +401,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
dev_priv->backlight_level = intel_panel_get_backlight(dev);
/* We try to do the minimum that is necessary in order to unlock /* We try to do the minimum that is necessary in order to unlock
* the registers for mode setting. * the registers for mode setting.
* *
...@@ -430,9 +431,6 @@ static void intel_lvds_commit(struct drm_encoder *encoder) ...@@ -430,9 +431,6 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
/* Undo any unlocking done in prepare to prevent accidental /* Undo any unlocking done in prepare to prevent accidental
* adjustment of the registers. * adjustment of the registers.
*/ */
......
...@@ -250,3 +250,34 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) ...@@ -250,3 +250,34 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level); I915_WRITE(BLC_PWM_CTL, tmp | level);
} }
void intel_panel_disable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_enabled) {
dev_priv->backlight_level = intel_panel_get_backlight(dev);
dev_priv->backlight_enabled = false;
}
intel_panel_set_backlight(dev, 0);
}
void intel_panel_enable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, dev_priv->backlight_level);
dev_priv->backlight_enabled = true;
}
void intel_panel_setup_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
}
This diff is collapsed.
...@@ -16,21 +16,24 @@ struct intel_hw_status_page { ...@@ -16,21 +16,24 @@ struct intel_hw_status_page {
#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base)) #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base)) #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base)) #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
struct intel_ring_buffer { struct intel_ring_buffer {
const char *name; const char *name;
...@@ -49,12 +52,15 @@ struct intel_ring_buffer { ...@@ -49,12 +52,15 @@ struct intel_ring_buffer {
u32 tail; u32 tail;
int space; int space;
int size; int size;
int effective_size;
struct intel_hw_status_page status_page; struct intel_hw_status_page status_page;
spinlock_t irq_lock;
u32 irq_refcount;
u32 irq_mask;
u32 irq_seqno; /* last seq seem at irq time */ u32 irq_seqno; /* last seq seem at irq time */
u32 waiting_seqno; u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1]; u32 sync_seqno[I915_NUM_RINGS-1];
atomic_t irq_refcount;
bool __must_check (*irq_get)(struct intel_ring_buffer *ring); bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring); void (*irq_put)(struct intel_ring_buffer *ring);
...@@ -62,9 +68,9 @@ struct intel_ring_buffer { ...@@ -62,9 +68,9 @@ struct intel_ring_buffer {
void (*write_tail)(struct intel_ring_buffer *ring, void (*write_tail)(struct intel_ring_buffer *ring,
u32 value); u32 value);
void (*flush)(struct intel_ring_buffer *ring, int __must_check (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains); u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring, int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno); u32 *seqno);
u32 (*get_seqno)(struct intel_ring_buffer *ring); u32 (*get_seqno)(struct intel_ring_buffer *ring);
......
...@@ -1024,9 +1024,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, ...@@ -1024,9 +1024,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo)) if (!intel_sdvo_set_target_input(intel_sdvo))
return; return;
if (intel_sdvo->has_hdmi_monitor && if (intel_sdvo->has_hdmi_monitor) {
!intel_sdvo_set_avi_infoframe(intel_sdvo)) intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
return; intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
intel_sdvo_set_avi_infoframe(intel_sdvo);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
if (intel_sdvo->is_tv && if (intel_sdvo->is_tv &&
!intel_sdvo_set_tv_format(intel_sdvo)) !intel_sdvo_set_tv_format(intel_sdvo))
...@@ -1398,6 +1402,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) ...@@ -1398,6 +1402,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->attached_output = response; intel_sdvo->attached_output = response;
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
if ((intel_sdvo_connector->output_flag & response) == 0) if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected; ret = connector_status_disconnected;
else if (response & SDVO_TMDS_MASK) else if (response & SDVO_TMDS_MASK)
...@@ -1922,20 +1929,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, ...@@ -1922,20 +1929,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
static bool static bool
intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
{ {
int is_hdmi; return intel_sdvo_check_supp_encode(intel_sdvo);
if (!intel_sdvo_check_supp_encode(intel_sdvo))
return false;
if (!intel_sdvo_set_target_output(intel_sdvo,
device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
return false;
is_hdmi = 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
return false;
return !!is_hdmi;
} }
static u8 static u8
...@@ -2037,12 +2031,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2037,12 +2031,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_DVID; connector->connector_type = DRM_MODE_CONNECTOR_DVID;
if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
/* enable hdmi encoding mode if supported */
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true; intel_sdvo->is_hdmi = true;
} }
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment