Commit 269b62db authored by Dave Airlie's avatar Dave Airlie

Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:

" First -next pull for 3.7. Highlights:
- hsw hdmi improvements (Paulo)
- ips/rps locking rework and cleanups
- rc6 on ilk by default again
- hw context&dp&dpff support for hsw (Ben)
- GET_PARAM_HAS_SEMAPHORES (Chris)
- gen6+ pipe_control improvements (Chris)
- set_cacheing ioctl and assorted support code (Chris)
- cleanups around the busy/idle/pm code (Chris&me)
- flushing_list removal, hopefully for good (Chris)
- read_reg ioctl (Ben)
- support the ns2501 dvo (Thomas Richter)
- avoid the costly gen6+ "missed IRQ" workaround where we don't need a
  race-free seqno readback (Chris)
- various bits&pieces, mostly early patches from the modeset rework branch"

* 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel: (54 commits)
  drm/i915: don't grab dev->struct_mutex for userspace forcewak
  drm/i915: try harder to find WR PLL clock settings
  drm/i915: use the correct encoder type when comparing
  drm/i915: Lazily apply the SNB+ seqno w/a
  drm/i915: enable rc6 on ilk again
  drm/i915: fix up ilk drps/ips locking
  drm/i915: DE_PCU_EVENT irq is ilk-only
  drm/i915: kill dev_priv->mchdev_lock
  drm/i915: move all rps state into dev_priv->rps
  drm/i915: use mutex_lock_interruptible for debugfs files
  drm/i915: fixup up debugfs rps state handling
  drm/i915: properly guard ilk ips state
  drm/i915: add parentheses around PIXCLK_GATE definitions
  drm/i915: reindent Haswell register definitions
  drm/i915: completely reset the value of DDI_FUNC_CTL
  drm/i915: correctly set the DDI_FUNC_CTL bpc field
  drm/i915: set the DDI sync polarity bits
  drm/i915: fix pipe DDI mode select
  drm/i915: dump the device info
  drm/i915: fixup desired rps frequency computation
  ...
parents d9875690 a22ddff8
...@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ ...@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
dvo_ivch.o \ dvo_ivch.o \
dvo_tfp410.o \ dvo_tfp410.o \
dvo_sil164.o \ dvo_sil164.o \
dvo_ns2501.o \
i915_gem_dmabuf.o i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o i915-$(CONFIG_COMPAT) += i915_ioc32.o
......
...@@ -140,5 +140,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops; ...@@ -140,5 +140,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops; extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops; extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops; extern struct intel_dvo_dev_ops ch7017_ops;
extern struct intel_dvo_dev_ops ns2501_ops;
#endif /* _INTEL_DVO_H */ #endif /* _INTEL_DVO_H */
This diff is collapsed.
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
enum { enum {
ACTIVE_LIST, ACTIVE_LIST,
FLUSHING_LIST,
INACTIVE_LIST, INACTIVE_LIST,
PINNED_LIST, PINNED_LIST,
}; };
...@@ -62,28 +61,11 @@ static int i915_capabilities(struct seq_file *m, void *data) ...@@ -62,28 +61,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", info->gen); seq_printf(m, "gen: %d\n", info->gen);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) #define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile); #define DEV_INFO_SEP ;
B(is_i85x); DEV_INFO_FLAGS;
B(is_i915g); #undef DEV_INFO_FLAG
B(is_i945gm); #undef DEV_INFO_SEP
B(is_g33);
B(need_gfx_hws);
B(is_g4x);
B(is_pineview);
B(is_broadwater);
B(is_crestline);
B(has_fbc);
B(has_pipe_cxsr);
B(has_hotplug);
B(cursor_needs_physical);
B(has_overlay);
B(overlay_needs_physical);
B(supports_tv);
B(has_bsd_ring);
B(has_blt_ring);
B(has_llc);
#undef B
return 0; return 0;
} }
...@@ -121,14 +103,15 @@ static const char *cache_level_str(int type) ...@@ -121,14 +103,15 @@ static const char *cache_level_str(int type)
static void static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{ {
seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
&obj->base, &obj->base,
get_pin_flag(obj), get_pin_flag(obj),
get_tiling_flag(obj), get_tiling_flag(obj),
obj->base.size / 1024, obj->base.size / 1024,
obj->base.read_domains, obj->base.read_domains,
obj->base.write_domain, obj->base.write_domain,
obj->last_rendering_seqno, obj->last_read_seqno,
obj->last_write_seqno,
obj->last_fenced_seqno, obj->last_fenced_seqno,
cache_level_str(obj->cache_level), cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "", obj->dirty ? " dirty" : "",
...@@ -177,10 +160,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -177,10 +160,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Inactive:\n"); seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list; head = &dev_priv->mm.inactive_list;
break; break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
default: default:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return -EINVAL; return -EINVAL;
...@@ -238,7 +217,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -238,7 +217,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0; size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.active_list, mm_list); count_objects(&dev_priv->mm.active_list, mm_list);
count_objects(&dev_priv->mm.flushing_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size); count, mappable_count, size, mappable_size);
...@@ -413,7 +391,7 @@ static void i915_ring_seqno_info(struct seq_file *m, ...@@ -413,7 +391,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
{ {
if (ring->get_seqno) { if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n", seq_printf(m, "Current sequence (%s): %d\n",
ring->name, ring->get_seqno(ring)); ring->name, ring->get_seqno(ring, false));
} }
} }
...@@ -630,12 +608,12 @@ static void print_error_buffers(struct seq_file *m, ...@@ -630,12 +608,12 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count); seq_printf(m, "%s [%d]:\n", name, count);
while (count--) { while (count--) {
seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
err->gtt_offset, err->gtt_offset,
err->size, err->size,
err->read_domains, err->read_domains,
err->write_domain, err->write_domain,
err->seqno, err->rseqno, err->wseqno,
pin_flag(err->pinned), pin_flag(err->pinned),
tiling_flag(err->tiling), tiling_flag(err->tiling),
dirty_flag(err->dirty), dirty_flag(err->dirty),
...@@ -799,10 +777,14 @@ i915_error_state_write(struct file *filp, ...@@ -799,10 +777,14 @@ i915_error_state_write(struct file *filp,
struct seq_file *m = filp->private_data; struct seq_file *m = filp->private_data;
struct i915_error_state_file_priv *error_priv = m->private; struct i915_error_state_file_priv *error_priv = m->private;
struct drm_device *dev = error_priv->dev; struct drm_device *dev = error_priv->dev;
int ret;
DRM_DEBUG_DRIVER("Resetting error state\n"); DRM_DEBUG_DRIVER("Resetting error state\n");
mutex_lock(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
i915_destroy_error_state(dev); i915_destroy_error_state(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -1292,7 +1274,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1292,7 +1274,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) { gpu_freq++) {
I915_WRITE(GEN6_PCODE_DATA, gpu_freq); I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
...@@ -1472,8 +1455,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data) ...@@ -1472,8 +1455,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
mutex_lock(&dev->struct_mutex);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n", seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x)); swizzle_string(dev_priv->mm.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
...@@ -1674,7 +1661,7 @@ i915_ring_stop_write(struct file *filp, ...@@ -1674,7 +1661,7 @@ i915_ring_stop_write(struct file *filp,
struct drm_device *dev = filp->private_data; struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20]; char buf[20];
int val = 0; int val = 0, ret;
if (cnt > 0) { if (cnt > 0) {
if (cnt > sizeof(buf) - 1) if (cnt > sizeof(buf) - 1)
...@@ -1689,7 +1676,10 @@ i915_ring_stop_write(struct file *filp, ...@@ -1689,7 +1676,10 @@ i915_ring_stop_write(struct file *filp,
DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
mutex_lock(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
dev_priv->stop_rings = val; dev_priv->stop_rings = val;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -1713,10 +1703,18 @@ i915_max_freq_read(struct file *filp, ...@@ -1713,10 +1703,18 @@ i915_max_freq_read(struct file *filp,
struct drm_device *dev = filp->private_data; struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80]; char buf[80];
int len; int len, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf), len = snprintf(buf, sizeof(buf),
"max freq: %d\n", dev_priv->max_delay * 50); "max freq: %d\n", dev_priv->rps.max_delay * 50);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf)) if (len > sizeof(buf))
len = sizeof(buf); len = sizeof(buf);
...@@ -1733,7 +1731,10 @@ i915_max_freq_write(struct file *filp, ...@@ -1733,7 +1731,10 @@ i915_max_freq_write(struct file *filp,
struct drm_device *dev = filp->private_data; struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20]; char buf[20];
int val = 1; int val = 1, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
if (cnt > 0) { if (cnt > 0) {
if (cnt > sizeof(buf) - 1) if (cnt > sizeof(buf) - 1)
...@@ -1748,12 +1749,17 @@ i915_max_freq_write(struct file *filp, ...@@ -1748,12 +1749,17 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
/* /*
* Turbo will still be enabled, but won't go above the set value. * Turbo will still be enabled, but won't go above the set value.
*/ */
dev_priv->max_delay = val / 50; dev_priv->rps.max_delay = val / 50;
gen6_set_rps(dev, val / 50); gen6_set_rps(dev, val / 50);
mutex_unlock(&dev->struct_mutex);
return cnt; return cnt;
} }
...@@ -1773,10 +1779,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, ...@@ -1773,10 +1779,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
struct drm_device *dev = filp->private_data; struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80]; char buf[80];
int len; int len, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf), len = snprintf(buf, sizeof(buf),
"min freq: %d\n", dev_priv->min_delay * 50); "min freq: %d\n", dev_priv->rps.min_delay * 50);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf)) if (len > sizeof(buf))
len = sizeof(buf); len = sizeof(buf);
...@@ -1791,7 +1805,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -1791,7 +1805,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
struct drm_device *dev = filp->private_data; struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20]; char buf[20];
int val = 1; int val = 1, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
if (cnt > 0) { if (cnt > 0) {
if (cnt > sizeof(buf) - 1) if (cnt > sizeof(buf) - 1)
...@@ -1806,12 +1823,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -1806,12 +1823,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
/* /*
* Turbo will still be enabled, but won't go below the set value. * Turbo will still be enabled, but won't go below the set value.
*/ */
dev_priv->min_delay = val / 50; dev_priv->rps.min_delay = val / 50;
gen6_set_rps(dev, val / 50); gen6_set_rps(dev, val / 50);
mutex_unlock(&dev->struct_mutex);
return cnt; return cnt;
} }
...@@ -1834,9 +1856,15 @@ i915_cache_sharing_read(struct file *filp, ...@@ -1834,9 +1856,15 @@ i915_cache_sharing_read(struct file *filp,
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80]; char buf[80];
u32 snpcr; u32 snpcr;
int len; int len, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
mutex_lock(&dev_priv->dev->struct_mutex);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
mutex_unlock(&dev_priv->dev->struct_mutex); mutex_unlock(&dev_priv->dev->struct_mutex);
...@@ -1862,6 +1890,9 @@ i915_cache_sharing_write(struct file *filp, ...@@ -1862,6 +1890,9 @@ i915_cache_sharing_write(struct file *filp,
u32 snpcr; u32 snpcr;
int val = 1; int val = 1;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
if (cnt > 0) { if (cnt > 0) {
if (cnt > sizeof(buf) - 1) if (cnt > sizeof(buf) - 1)
return -EINVAL; return -EINVAL;
...@@ -1925,16 +1956,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) ...@@ -1925,16 +1956,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
{ {
struct drm_device *dev = inode->i_private; struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
return 0; return 0;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
gen6_gt_force_wake_get(dev_priv); gen6_gt_force_wake_get(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -1947,16 +1973,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) ...@@ -1947,16 +1973,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
return 0; return 0;
/*
* It's bad that we can potentially hang userspace if struct_mutex gets
* forever stuck. However, if we cannot acquire this lock it means that
* almost certainly the driver has hung, is not unload-able. Therefore
* hanging here is probably a minor inconvenience not to be seen my
* almost every user.
*/
mutex_lock(&dev->struct_mutex);
gen6_gt_force_wake_put(dev_priv); gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -2006,7 +2023,6 @@ static struct drm_info_list i915_debugfs_list[] = { ...@@ -2006,7 +2023,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_gtt", i915_gem_gtt_info, 0}, {"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_request", i915_gem_request_info, 0},
...@@ -2067,6 +2083,7 @@ int i915_debugfs_init(struct drm_minor *minor) ...@@ -2067,6 +2083,7 @@ int i915_debugfs_init(struct drm_minor *minor)
&i915_cache_sharing_fops); &i915_cache_sharing_fops);
if (ret) if (ret)
return ret; return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor, ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_ring_stop", "i915_ring_stop",
&i915_ring_stop_fops); &i915_ring_stop_fops);
......
...@@ -1009,6 +1009,9 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -1009,6 +1009,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_WAIT_TIMEOUT: case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_SEMAPHORES:
value = i915_semaphore_is_enabled(dev);
break;
default: default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n", DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param); param->param);
...@@ -1425,6 +1428,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) ...@@ -1425,6 +1428,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
kfree(ap); kfree(ap);
} }
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = dev_priv->info;
#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
#define DEV_INFO_SEP ,
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
info->gen,
dev_priv->dev->pdev->device,
DEV_INFO_FLAGS);
#undef DEV_INFO_FLAG
#undef DEV_INFO_SEP
}
/** /**
* i915_driver_load - setup chip and create an initial config * i915_driver_load - setup chip and create an initial config
* @dev: DRM device * @dev: DRM device
...@@ -1449,7 +1467,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1449,7 +1467,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV; return -ENODEV;
/* i915 has 4 more counters */ /* i915 has 4 more counters */
dev->counters += 4; dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ; dev->types[6] = _DRM_STAT_IRQ;
...@@ -1465,6 +1482,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1465,6 +1482,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev; dev_priv->dev = dev;
dev_priv->info = info; dev_priv->info = info;
i915_dump_device_info(dev_priv);
if (i915_get_bridge_dev(dev)) { if (i915_get_bridge_dev(dev)) {
ret = -EIO; ret = -EIO;
goto free_priv; goto free_priv;
...@@ -1586,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1586,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock); spin_lock_init(&dev_priv->rps.lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3; dev_priv->num_pipe = 3;
...@@ -1835,6 +1854,8 @@ struct drm_ioctl_desc i915_ioctls[] = { ...@@ -1835,6 +1854,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
...@@ -1857,6 +1878,7 @@ struct drm_ioctl_desc i915_ioctls[] = { ...@@ -1857,6 +1878,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
}; };
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
......
...@@ -1060,7 +1060,7 @@ static bool IS_DISPLAYREG(u32 reg) ...@@ -1060,7 +1060,7 @@ static bool IS_DISPLAYREG(u32 reg)
* This should make it easier to transition modules over to the * This should make it easier to transition modules over to the
* new register block scheme, since we can do it incrementally. * new register block scheme, since we can do it incrementally.
*/ */
if (reg >= 0x180000) if (reg >= VLV_DISPLAY_BASE)
return false; return false;
if (reg >= RENDER_RING_BASE && if (reg >= RENDER_RING_BASE &&
...@@ -1180,3 +1180,49 @@ __i915_write(16, w) ...@@ -1180,3 +1180,49 @@ __i915_write(16, w)
__i915_write(32, l) __i915_write(32, l)
__i915_write(64, q) __i915_write(64, q)
#undef __i915_write #undef __i915_write
static const struct register_whitelist {
uint64_t offset;
uint32_t size;
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
} whitelist[] = {
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
};
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
int i;
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (entry->offset == reg->offset &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
switch (entry->size) {
case 8:
reg->val = I915_READ64(reg->offset);
break;
case 4:
reg->val = I915_READ(reg->offset);
break;
case 2:
reg->val = I915_READ16(reg->offset);
break;
case 1:
reg->val = I915_READ8(reg->offset);
break;
default:
WARN_ON(1);
return -EINVAL;
}
return 0;
}
...@@ -109,6 +109,7 @@ struct intel_pch_pll { ...@@ -109,6 +109,7 @@ struct intel_pch_pll {
#define WATCH_COHERENCY 0 #define WATCH_COHERENCY 0
#define WATCH_LISTS 0 #define WATCH_LISTS 0
#define WATCH_GTT 0
#define I915_GEM_PHYS_CURSOR_0 1 #define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2 #define I915_GEM_PHYS_CURSOR_1 2
...@@ -221,7 +222,7 @@ struct drm_i915_error_state { ...@@ -221,7 +222,7 @@ struct drm_i915_error_state {
struct drm_i915_error_buffer { struct drm_i915_error_buffer {
u32 size; u32 size;
u32 name; u32 name;
u32 seqno; u32 rseqno, wseqno;
u32 gtt_offset; u32 gtt_offset;
u32 read_domains; u32 read_domains;
u32 write_domain; u32 write_domain;
...@@ -248,7 +249,6 @@ struct drm_i915_display_funcs { ...@@ -248,7 +249,6 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev); void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe, void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size); uint32_t sprite_width, int pixel_size);
void (*sanitize_pm)(struct drm_device *dev);
void (*update_linetime_wm)(struct drm_device *dev, int pipe, void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode); struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc, int (*crtc_mode_set)(struct drm_crtc *crtc,
...@@ -279,6 +279,32 @@ struct drm_i915_gt_funcs { ...@@ -279,6 +279,32 @@ struct drm_i915_gt_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv); void (*force_wake_put)(struct drm_i915_private *dev_priv);
}; };
#define DEV_INFO_FLAGS \
DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_llc)
struct intel_device_info { struct intel_device_info {
u8 gen; u8 gen;
u8 is_mobile:1; u8 is_mobile:1;
...@@ -695,17 +721,6 @@ typedef struct drm_i915_private { ...@@ -695,17 +721,6 @@ typedef struct drm_i915_private {
*/ */
struct list_head active_list; struct list_head active_list;
/**
* List of objects which are not in the ringbuffer but which
* still have a write_domain which needs to be flushed before
* unbinding.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
/** /**
* LRU list of objects which are not in the ringbuffer and * LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT. * are ready to unbind, but are still in the GTT.
...@@ -796,9 +811,6 @@ typedef struct drm_i915_private { ...@@ -796,9 +811,6 @@ typedef struct drm_i915_private {
bool lvds_downclock_avail; bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/ /* indicates the reduced downclock for LVDS*/
int lvds_downclock; int lvds_downclock;
struct work_struct idle_work;
struct timer_list idle_timer;
bool busy;
u16 orig_clock; u16 orig_clock;
int child_dev_num; int child_dev_num;
struct child_device_config *child_dev; struct child_device_config *child_dev;
...@@ -807,9 +819,21 @@ typedef struct drm_i915_private { ...@@ -807,9 +819,21 @@ typedef struct drm_i915_private {
bool mchbar_need_disable; bool mchbar_need_disable;
struct work_struct rps_work; /* gen6+ rps state */
spinlock_t rps_lock; struct {
struct work_struct work;
u32 pm_iir; u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
} rps;
u8 cur_delay; u8 cur_delay;
u8 min_delay; u8 min_delay;
...@@ -826,7 +850,6 @@ typedef struct drm_i915_private { ...@@ -826,7 +850,6 @@ typedef struct drm_i915_private {
int c_m; int c_m;
int r_t; int r_t;
u8 corr; u8 corr;
spinlock_t *mchdev_lock;
enum no_fbc_reason no_fbc_reason; enum no_fbc_reason no_fbc_reason;
...@@ -861,9 +884,9 @@ enum hdmi_force_audio { ...@@ -861,9 +884,9 @@ enum hdmi_force_audio {
}; };
enum i915_cache_level { enum i915_cache_level {
I915_CACHE_NONE, I915_CACHE_NONE = 0,
I915_CACHE_LLC, I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+ */ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
}; };
struct drm_i915_gem_object { struct drm_i915_gem_object {
...@@ -873,18 +896,16 @@ struct drm_i915_gem_object { ...@@ -873,18 +896,16 @@ struct drm_i915_gem_object {
struct drm_mm_node *gtt_space; struct drm_mm_node *gtt_space;
struct list_head gtt_list; struct list_head gtt_list;
/** This object's place on the active/flushing/inactive lists */ /** This object's place on the active/inactive lists */
struct list_head ring_list; struct list_head ring_list;
struct list_head mm_list; struct list_head mm_list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
/** This object's place in the batchbuffer or on the eviction list */ /** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list; struct list_head exec_list;
/** /**
* This is set if the object is on the active or flushing lists * This is set if the object is on the active lists (has pending
* (has pending rendering), and is not set if it's on inactive (ready * rendering and so a non-zero seqno), and is not set if it i s on
* to be unbound). * inactive (ready to be unbound) list.
*/ */
unsigned int active:1; unsigned int active:1;
...@@ -894,12 +915,6 @@ struct drm_i915_gem_object { ...@@ -894,12 +915,6 @@ struct drm_i915_gem_object {
*/ */
unsigned int dirty:1; unsigned int dirty:1;
/**
* This is set if the object has been written to since the last
* GPU flush.
*/
unsigned int pending_gpu_write:1;
/** /**
* Fence register bits (if any) for this object. Will be set * Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT. * as needed when mapped into the GTT.
...@@ -992,7 +1007,8 @@ struct drm_i915_gem_object { ...@@ -992,7 +1007,8 @@ struct drm_i915_gem_object {
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */ /** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno; uint32_t last_read_seqno;
uint32_t last_write_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */ /** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno; uint32_t last_fenced_seqno;
...@@ -1135,6 +1151,8 @@ struct drm_i915_file_private { ...@@ -1135,6 +1151,8 @@ struct drm_i915_file_private {
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#include "i915_trace.h" #include "i915_trace.h"
/** /**
...@@ -1256,6 +1274,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ...@@ -1256,6 +1274,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data, int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
...@@ -1274,9 +1296,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, ...@@ -1274,9 +1296,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj); int i915_gem_init_object(struct drm_gem_object *obj);
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
...@@ -1291,7 +1310,6 @@ void i915_gem_lastclose(struct drm_device *dev); ...@@ -1291,7 +1310,6 @@ void i915_gem_lastclose(struct drm_device *dev);
int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask); gfp_t gfpmask);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
int i915_gem_object_sync(struct drm_i915_gem_object *obj, int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to); struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
...@@ -1358,7 +1376,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev); ...@@ -1358,7 +1376,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring, int i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_request *request); struct drm_i915_gem_request *request);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
...@@ -1429,7 +1447,9 @@ void i915_gem_init_global_gtt(struct drm_device *dev, ...@@ -1429,7 +1447,9 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* i915_gem_evict.c */ /* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable); unsigned alignment,
unsigned cache_level,
bool mappable);
int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
/* i915_gem_stolen.c */ /* i915_gem_stolen.c */
...@@ -1529,6 +1549,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); ...@@ -1529,6 +1549,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev); extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_device *dev);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* overlay */ /* overlay */
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
......
This diff is collapsed.
...@@ -97,8 +97,7 @@ ...@@ -97,8 +97,7 @@
static struct i915_hw_context * static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
static int do_switch(struct drm_i915_gem_object *from_obj, static int do_switch(struct i915_hw_context *to);
struct i915_hw_context *to, u32 seqno);
static int get_context_size(struct drm_device *dev) static int get_context_size(struct drm_device *dev)
{ {
...@@ -113,6 +112,9 @@ static int get_context_size(struct drm_device *dev) ...@@ -113,6 +112,9 @@ static int get_context_size(struct drm_device *dev)
break; break;
case 7: case 7:
reg = I915_READ(GEN7_CXT_SIZE); reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break; break;
default: default:
...@@ -220,19 +222,20 @@ static int create_default_context(struct drm_i915_private *dev_priv) ...@@ -220,19 +222,20 @@ static int create_default_context(struct drm_i915_private *dev_priv)
*/ */
dev_priv->ring[RCS].default_context = ctx; dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
if (ret) { if (ret)
do_destroy(ctx); goto err_destroy;
return ret;
} ret = do_switch(ctx);
if (ret)
goto err_unpin;
ret = do_switch(NULL, ctx, 0);
if (ret) {
i915_gem_object_unpin(ctx->obj);
do_destroy(ctx);
} else {
DRM_DEBUG_DRIVER("Default HW context loaded\n"); DRM_DEBUG_DRIVER("Default HW context loaded\n");
} return 0;
err_unpin:
i915_gem_object_unpin(ctx->obj);
err_destroy:
do_destroy(ctx);
return ret; return ret;
} }
...@@ -359,17 +362,18 @@ mi_set_context(struct intel_ring_buffer *ring, ...@@ -359,17 +362,18 @@ mi_set_context(struct intel_ring_buffer *ring,
return ret; return ret;
} }
static int do_switch(struct drm_i915_gem_object *from_obj, static int do_switch(struct i915_hw_context *to)
struct i915_hw_context *to,
u32 seqno)
{ {
struct intel_ring_buffer *ring = NULL; struct intel_ring_buffer *ring = to->ring;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
u32 hw_flags = 0; u32 hw_flags = 0;
int ret; int ret;
BUG_ON(to == NULL);
BUG_ON(from_obj != NULL && from_obj->pin_count == 0); BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
if (from_obj == to->obj)
return 0;
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
if (ret) if (ret)
return ret; return ret;
...@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj, ...@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE; hw_flags |= MI_FORCE_RESTORE;
ring = to->ring;
ret = mi_set_context(ring, to, hw_flags); ret = mi_set_context(ring, to, hw_flags);
if (ret) { if (ret) {
i915_gem_object_unpin(to->obj); i915_gem_object_unpin(to->obj);
...@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj, ...@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* MI_SET_CONTEXT instead of when the next seqno has completed. * MI_SET_CONTEXT instead of when the next seqno has completed.
*/ */
if (from_obj != NULL) { if (from_obj != NULL) {
u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring, seqno); i915_gem_object_move_to_active(from_obj, ring, seqno);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
...@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj, ...@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* swapped, but there is no way to do that yet. * swapped, but there is no way to do that yet.
*/ */
from_obj->dirty = 1; from_obj->dirty = 1;
BUG_ON(from_obj->ring != to->ring); BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj); i915_gem_object_unpin(from_obj);
drm_gem_object_unreference(&from_obj->base); drm_gem_object_unreference(&from_obj->base);
...@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring, ...@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
int to_id) int to_id)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_file_private *file_priv = NULL;
struct i915_hw_context *to; struct i915_hw_context *to;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
if (dev_priv->hw_contexts_disabled) if (dev_priv->hw_contexts_disabled)
return 0; return 0;
...@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring, ...@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring,
if (ring != &dev_priv->ring[RCS]) if (ring != &dev_priv->ring[RCS])
return 0; return 0;
if (file)
file_priv = file->driver_priv;
if (to_id == DEFAULT_CONTEXT_ID) { if (to_id == DEFAULT_CONTEXT_ID) {
to = ring->default_context; to = ring->default_context;
} else { } else {
to = i915_gem_context_get(file_priv, to_id); if (file == NULL)
return -EINVAL;
to = i915_gem_context_get(file->driver_priv, to_id);
if (to == NULL) if (to == NULL)
return -ENOENT; return -ENOENT;
} }
if (from_obj == to->obj) return do_switch(to);
return 0;
return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
} }
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
......
...@@ -44,7 +44,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) ...@@ -44,7 +44,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
int int
i915_gem_evict_something(struct drm_device *dev, int min_size, i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable) unsigned alignment, unsigned cache_level,
bool mappable)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list; struct list_head eviction_list, unwind_list;
...@@ -79,11 +80,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -79,11 +80,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list); INIT_LIST_HEAD(&unwind_list);
if (mappable) if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
min_size, alignment, 0, min_size, alignment, cache_level,
0, dev_priv->mm.gtt_mappable_end); 0, dev_priv->mm.gtt_mappable_end);
else else
drm_mm_init_scan(&dev_priv->mm.gtt_space, drm_mm_init_scan(&dev_priv->mm.gtt_space,
min_size, alignment, 0); min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */ /* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
...@@ -93,23 +94,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -93,23 +94,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Now merge in the soon-to-be-expired objects... */ /* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
if (obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (!obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list)) if (mark_free(obj, &unwind_list))
goto found; goto found;
} }
...@@ -172,7 +156,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -172,7 +156,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
int ret; int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) && lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list)); list_empty(&dev_priv->mm.active_list));
if (lists_empty) if (lists_empty)
return -ENOSPC; return -ENOSPC;
...@@ -189,8 +172,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -189,8 +172,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
/* Having flushed everything, unbind() should never raise an error */ /* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next, list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) { &dev_priv->mm.inactive_list, mm_list) {
......
...@@ -34,180 +34,6 @@ ...@@ -34,180 +34,6 @@
#include "intel_drv.h" #include "intel_drv.h"
#include <linux/dma_remapping.h> #include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
uint32_t flips;
};
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
* as that may want to be batched with other set_domain operations
*
* This is (we hope) the only really tricky part of gem. The goal
* is fairly simple -- track which caches hold bits of the object
* and make sure they remain coherent. A few concrete examples may
* help to explain how it works. For shorthand, we use the notation
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
* a pair of read and write domain masks.
*
* Case 1: the batch buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Mapped to GTT
* 4. Read by GPU
* 5. Unmapped from GTT
* 6. Freed
*
* Let's take these a step at a time
*
* 1. Allocated
* Pages allocated from the kernel may still have
* cache contents, so we set them to (CPU, CPU) always.
* 2. Written by CPU (using pwrite)
* The pwrite function calls set_domain (CPU, CPU) and
* this function does nothing (as nothing changes)
* 3. Mapped by GTT
* This function asserts that the object is not
* currently in any GPU-based read or write domains
* 4. Read by GPU
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
* As write_domain is zero, this function adds in the
* current read domains (CPU+COMMAND, 0).
* flush_domains is set to CPU.
* invalidate_domains is set to COMMAND
* clflush is run to get data out of the CPU caches
* then i915_dev_set_domain calls i915_gem_flush to
* emit an MI_FLUSH and drm_agp_chipset_flush
* 5. Unmapped from GTT
* i915_gem_object_unbind calls set_domain (CPU, CPU)
* flush_domains and invalidate_domains end up both zero
* so no flushing/invalidating happens
* 6. Freed
* yay, done
*
* Case 2: The shared render buffer
*
* 1. Allocated
* 2. Mapped to GTT
* 3. Read/written by GPU
* 4. set_domain to (CPU,CPU)
* 5. Read/written by CPU
* 6. Read/written by GPU
*
* 1. Allocated
* Same as last example, (CPU, CPU)
* 2. Mapped to GTT
* Nothing changes (assertions find that it is not in the GPU)
* 3. Read/written by GPU
* execbuffer calls set_domain (RENDER, RENDER)
* flush_domains gets CPU
* invalidate_domains gets GPU
* clflush (obj)
* MI_FLUSH and drm_agp_chipset_flush
* 4. set_domain (CPU, CPU)
* flush_domains gets GPU
* invalidate_domains gets CPU
* wait_rendering (obj) to make sure all drawing is complete.
* This will include an MI_FLUSH to get the data from GPU
* to memory
* clflush (obj) to invalidate the CPU cache
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
* 5. Read/written by CPU
* cache lines are loaded and dirtied
* 6. Read written by GPU
* Same as last GPU access
*
* Case 3: The constant buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Read by GPU
* 4. Updated (written) by CPU again
* 5. Read by GPU
*
* 1. Allocated
* (CPU, CPU)
* 2. Written by CPU
* (CPU, CPU)
* 3. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
* 4. Updated (written) by CPU again
* (CPU, CPU)
* flush_domains = 0 (no previous write domain)
* invalidate_domains = 0 (no new read domains)
* 5. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static void
i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct change_domains *cd)
{
uint32_t invalidate_domains = 0, flush_domains = 0;
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
*/
if (obj->base.pending_write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
/*
* Flush the current write domain if
* the new read domains don't match. Invalidate
* any read domains which differ from the old
* write domain
*/
if (obj->base.write_domain &&
(((obj->base.write_domain != obj->base.pending_read_domains ||
obj->ring != ring)) ||
(obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
flush_domains |= obj->base.write_domain;
invalidate_domains |=
obj->base.pending_read_domains & ~obj->base.write_domain;
}
/*
* Invalidate any read caches which may have
* stale data. That is, any new read domains.
*/
invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
* write_domains). So if we have a current write domain that we
* aren't changing, set pending_write_domain to that.
*/
if (flush_domains == 0 && obj->base.pending_write_domain == 0)
obj->base.pending_write_domain = obj->base.write_domain;
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(ring);
}
struct eb_objects { struct eb_objects {
int and; int and;
struct hlist_head buckets[0]; struct hlist_head buckets[0];
...@@ -587,6 +413,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -587,6 +413,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->base.pending_read_domains = 0; obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
} }
list_splice(&ordered_objects, objects); list_splice(&ordered_objects, objects);
...@@ -810,18 +637,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -810,18 +637,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret; return ret;
} }
static void
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
}
static int static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{ {
...@@ -854,48 +669,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) ...@@ -854,48 +669,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
return 0; return 0;
} }
static int static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects) struct list_head *objects)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct change_domains cd; uint32_t flush_domains = 0;
uint32_t flips = 0;
int ret; int ret;
memset(&cd, 0, sizeof(cd)); list_for_each_entry(obj, objects, exec_list) {
list_for_each_entry(obj, objects, exec_list) ret = i915_gem_object_sync(obj, ring);
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains);
}
if (cd.flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
if (ret) if (ret)
return ret; return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
flips |= atomic_read(&obj->pending_flip);
flush_domains |= obj->base.write_domain;
} }
list_for_each_entry(obj, objects, exec_list) { if (flips) {
ret = i915_gem_object_sync(obj, ring); ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret) if (ret)
return ret; return ret;
} }
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
/* Unconditionally invalidate gpu caches and ensure that we do flush /* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch. * any residual writes from the previous batch.
*/ */
ret = i915_gem_flush_ring(ring, return intel_ring_invalidate_all_caches(ring);
I915_GEM_GPU_DOMAINS,
ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
if (ret)
return ret;
ring->gpu_caches_dirty = false;
return 0;
} }
static bool static bool
...@@ -946,7 +758,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, ...@@ -946,7 +758,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
u32 old_read = obj->base.read_domains; u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain; u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains; obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain; obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access; obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
...@@ -954,17 +765,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, ...@@ -954,17 +765,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
i915_gem_object_move_to_active(obj, ring, seqno); i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) { if (obj->base.write_domain) {
obj->dirty = 1; obj->dirty = 1;
obj->pending_gpu_write = true; obj->last_write_seqno = seqno;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
if (obj->pin_count) /* check for potential scanout */ if (obj->pin_count) /* check for potential scanout */
intel_mark_busy(ring->dev, obj); intel_mark_fb_busy(obj);
} }
trace_i915_gem_object_change_domain(obj, old_read, old_write); trace_i915_gem_object_change_domain(obj, old_read, old_write);
} }
intel_mark_busy(ring->dev, NULL);
} }
static void static void
...@@ -972,16 +779,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, ...@@ -972,16 +779,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file, struct drm_file *file,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
struct drm_i915_gem_request *request;
/* Unconditionally force add_request to emit a full flush. */ /* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true; ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */ /* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL); (void)i915_add_request(ring, file, NULL);
if (request == NULL || i915_add_request(ring, file, request)) {
kfree(request);
}
} }
static int static int
......
...@@ -423,6 +423,23 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) ...@@ -423,6 +423,23 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
undo_idling(dev_priv, interruptible); undo_idling(dev_priv, interruptible);
} }
static void i915_gtt_color_adjust(struct drm_mm_node *node,
unsigned long color,
unsigned long *start,
unsigned long *end)
{
if (node->color != color)
*start += 4096;
if (!list_empty(&node->node_list)) {
node = list_entry(node->node_list.next,
struct drm_mm_node,
node_list);
if (node->allocated && node->color != color)
*end -= 4096;
}
}
void i915_gem_init_global_gtt(struct drm_device *dev, void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start, unsigned long start,
unsigned long mappable_end, unsigned long mappable_end,
...@@ -432,6 +449,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev, ...@@ -432,6 +449,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* Substract the guard page ... */ /* Substract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
dev_priv->mm.gtt_start = start; dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end; dev_priv->mm.gtt_mappable_end = mappable_end;
......
...@@ -296,11 +296,21 @@ static void i915_hotplug_work_func(struct work_struct *work) ...@@ -296,11 +296,21 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev); drm_helper_hpd_irq_event(dev);
} }
static void i915_handle_rps_change(struct drm_device *dev) /* defined intel_pm.c */
extern spinlock_t mchdev_lock;
static void ironlake_handle_rps_change(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg; u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay = dev_priv->cur_delay; u8 new_delay;
unsigned long flags;
spin_lock_irqsave(&mchdev_lock, flags);
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
new_delay = dev_priv->cur_delay;
I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = I915_READ(RCPREVBSYTUPAVG); busy_up = I915_READ(RCPREVBSYTUPAVG);
...@@ -324,6 +334,8 @@ static void i915_handle_rps_change(struct drm_device *dev) ...@@ -324,6 +334,8 @@ static void i915_handle_rps_change(struct drm_device *dev)
if (ironlake_set_drps(dev, new_delay)) if (ironlake_set_drps(dev, new_delay))
dev_priv->cur_delay = new_delay; dev_priv->cur_delay = new_delay;
spin_unlock_irqrestore(&mchdev_lock, flags);
return; return;
} }
...@@ -335,7 +347,7 @@ static void notify_ring(struct drm_device *dev, ...@@ -335,7 +347,7 @@ static void notify_ring(struct drm_device *dev,
if (ring->obj == NULL) if (ring->obj == NULL)
return; return;
trace_i915_gem_request_complete(ring, ring->get_seqno(ring)); trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
wake_up_all(&ring->irq_queue); wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) { if (i915_enable_hangcheck) {
...@@ -349,16 +361,16 @@ static void notify_ring(struct drm_device *dev, ...@@ -349,16 +361,16 @@ static void notify_ring(struct drm_device *dev,
static void gen6_pm_rps_work(struct work_struct *work) static void gen6_pm_rps_work(struct work_struct *work)
{ {
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps_work); rps.work);
u32 pm_iir, pm_imr; u32 pm_iir, pm_imr;
u8 new_delay; u8 new_delay;
spin_lock_irq(&dev_priv->rps_lock); spin_lock_irq(&dev_priv->rps.lock);
pm_iir = dev_priv->pm_iir; pm_iir = dev_priv->rps.pm_iir;
dev_priv->pm_iir = 0; dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR); pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0); I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock); spin_unlock_irq(&dev_priv->rps.lock);
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return; return;
...@@ -366,9 +378,9 @@ static void gen6_pm_rps_work(struct work_struct *work) ...@@ -366,9 +378,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->dev->struct_mutex); mutex_lock(&dev_priv->dev->struct_mutex);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->cur_delay + 1; new_delay = dev_priv->rps.cur_delay + 1;
else else
new_delay = dev_priv->cur_delay - 1; new_delay = dev_priv->rps.cur_delay - 1;
gen6_set_rps(dev_priv->dev, new_delay); gen6_set_rps(dev_priv->dev, new_delay);
...@@ -444,7 +456,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev) ...@@ -444,7 +456,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long flags; unsigned long flags;
if (!IS_IVYBRIDGE(dev)) if (!HAS_L3_GPU_CACHE(dev))
return; return;
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
...@@ -488,19 +500,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, ...@@ -488,19 +500,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
* IIR bits should never already be set because IMR should * IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning * prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared * displays a case where we've unsafely cleared
* dev_priv->pm_iir. Although missing an interrupt of the same * dev_priv->rps.pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic. * type is not a problem, it displays a problem in the logic.
* *
* The mask bit in IMR is cleared by rps_work. * The mask bit in IMR is cleared by dev_priv->rps.work.
*/ */
spin_lock_irqsave(&dev_priv->rps_lock, flags); spin_lock_irqsave(&dev_priv->rps.lock, flags);
dev_priv->pm_iir |= pm_iir; dev_priv->rps.pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR); POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps_lock, flags); spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
queue_work(dev_priv->wq, &dev_priv->rps_work); queue_work(dev_priv->wq, &dev_priv->rps.work);
} }
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
...@@ -793,10 +805,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) ...@@ -793,10 +805,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
ibx_irq_handler(dev, pch_iir); ibx_irq_handler(dev, pch_iir);
} }
if (de_iir & DE_PCU_EVENT) { if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); ironlake_handle_rps_change(dev);
i915_handle_rps_change(dev);
}
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir); gen6_queue_rps_work(dev_priv, pm_iir);
...@@ -949,7 +959,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -949,7 +959,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
{ {
err->size = obj->base.size; err->size = obj->base.size;
err->name = obj->base.name; err->name = obj->base.name;
err->seqno = obj->last_rendering_seqno; err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
err->gtt_offset = obj->gtt_offset; err->gtt_offset = obj->gtt_offset;
err->read_domains = obj->base.read_domains; err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain; err->write_domain = obj->base.write_domain;
...@@ -1039,12 +1050,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, ...@@ -1039,12 +1050,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno) if (!ring->get_seqno)
return NULL; return NULL;
seqno = ring->get_seqno(ring); seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring) if (obj->ring != ring)
continue; continue;
if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue; continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
...@@ -1093,7 +1104,7 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -1093,7 +1104,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring); error->seqno[ring->id] = ring->get_seqno(ring, false);
error->acthd[ring->id] = intel_ring_get_active_head(ring); error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring); error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring); error->tail[ring->id] = I915_READ_TAIL(ring);
...@@ -1590,7 +1601,8 @@ ring_last_seqno(struct intel_ring_buffer *ring) ...@@ -1590,7 +1601,8 @@ ring_last_seqno(struct intel_ring_buffer *ring)
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
{ {
if (list_empty(&ring->request_list) || if (list_empty(&ring->request_list) ||
i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { i915_seqno_passed(ring->get_seqno(ring, false),
ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */ /* Issue a wake-up to catch stuck h/w. */
if (waitqueue_active(&ring->irq_queue)) { if (waitqueue_active(&ring->irq_queue)) {
DRM_ERROR("Hangcheck timer elapsed... %s idle\n", DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
...@@ -2647,7 +2659,7 @@ void intel_irq_init(struct drm_device *dev) ...@@ -2647,7 +2659,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->driver->get_vblank_counter = i915_get_vblank_counter;
......
...@@ -449,6 +449,7 @@ ...@@ -449,6 +449,7 @@
#define RING_ACTHD(base) ((base)+0x74) #define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94) #define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8) #define RING_IMR(base) ((base)+0xa8)
#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8 #define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000 #define HEAD_WRAP_ONE 0x00200000
...@@ -528,6 +529,8 @@ ...@@ -528,6 +529,8 @@
#define GFX_PSMI_GRANULARITY (1<<10) #define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9) #define GFX_PPGTT_ENABLE (1<<9)
#define VLV_DISPLAY_BASE 0x180000
#define SCPD0 0x0209c /* 915+ only */ #define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0 #define IER 0x020a0
#define IIR 0x020a4 #define IIR 0x020a4
...@@ -1495,6 +1498,14 @@ ...@@ -1495,6 +1498,14 @@
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_GT1_SIZE(ctx_reg) + \ GEN7_CXT_GT1_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg)) GEN7_CXT_VFSTATE_SIZE(ctx_reg))
#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
HSW_CXT_RING_SIZE(ctx_reg) + \
HSW_CXT_RENDER_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
/* /*
* Overlay regs * Overlay regs
...@@ -1548,12 +1559,35 @@ ...@@ -1548,12 +1559,35 @@
/* VGA port control */ /* VGA port control */
#define ADPA 0x61100 #define ADPA 0x61100
#define PCH_ADPA 0xe1100
#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
#define ADPA_DAC_ENABLE (1<<31) #define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0 #define ADPA_DAC_DISABLE 0
#define ADPA_PIPE_SELECT_MASK (1<<30) #define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0 #define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30) #define ADPA_PIPE_B_SELECT (1<<30)
#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
/* CPT uses bits 29:30 for pch transcoder select */
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15) #define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0 #define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11) #define ADPA_VSYNC_CNTL_DISABLE (1<<11)
...@@ -3888,31 +3922,6 @@ ...@@ -3888,31 +3922,6 @@
#define FDI_PLL_CTL_1 0xfe000 #define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004 #define FDI_PLL_CTL_2 0xfe004
/* CRT */
#define PCH_ADPA 0xe1100
#define ADPA_TRANS_SELECT_MASK (1<<30)
#define ADPA_TRANS_A_SELECT 0
#define ADPA_TRANS_B_SELECT (1<<30)
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
/* or SDVOB */ /* or SDVOB */
#define HDMIB 0xe1140 #define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31) #define PORT_ENABLE (1 << 31)
...@@ -4286,22 +4295,25 @@ ...@@ -4286,22 +4295,25 @@
#define PIPE_DDI_FUNC_CTL_B 0x61400 #define PIPE_DDI_FUNC_CTL_B 0x61400
#define PIPE_DDI_FUNC_CTL_C 0x62400 #define PIPE_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 #define PIPE_DDI_FUNC_CTL_EDP 0x6F400
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \ #define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
PIPE_DDI_FUNC_CTL_A, \
PIPE_DDI_FUNC_CTL_B) PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31) #define PIPE_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define PIPE_DDI_PORT_MASK (7<<28) #define PIPE_DDI_PORT_MASK (7<<28)
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) #define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) #define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
#define PIPE_DDI_MODE_SELECT_DVI (1<<24) #define PIPE_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) #define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) #define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
#define PIPE_DDI_MODE_SELECT_FDI (4<<24) #define PIPE_DDI_MODE_SELECT_FDI (4<<24)
#define PIPE_DDI_BPC_MASK (7<<20)
#define PIPE_DDI_BPC_8 (0<<20) #define PIPE_DDI_BPC_8 (0<<20)
#define PIPE_DDI_BPC_10 (1<<20) #define PIPE_DDI_BPC_10 (1<<20)
#define PIPE_DDI_BPC_6 (2<<20) #define PIPE_DDI_BPC_6 (2<<20)
#define PIPE_DDI_BPC_12 (3<<20) #define PIPE_DDI_BPC_12 (3<<20)
#define PIPE_DDI_PVSYNC (1<<17)
#define PIPE_DDI_PHSYNC (1<<16)
#define PIPE_DDI_BFI_ENABLE (1<<4) #define PIPE_DDI_BFI_ENABLE (1<<4)
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) #define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) #define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
...@@ -4310,9 +4322,7 @@ ...@@ -4310,9 +4322,7 @@
/* DisplayPort Transport Control */ /* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040 #define DP_TP_CTL_A 0x64040
#define DP_TP_CTL_B 0x64140 #define DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _PORT(port, \ #define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
DP_TP_CTL_A, \
DP_TP_CTL_B)
#define DP_TP_CTL_ENABLE (1<<31) #define DP_TP_CTL_ENABLE (1<<31)
#define DP_TP_CTL_MODE_SST (0<<27) #define DP_TP_CTL_MODE_SST (0<<27)
#define DP_TP_CTL_MODE_MST (1<<27) #define DP_TP_CTL_MODE_MST (1<<27)
...@@ -4326,17 +4336,13 @@ ...@@ -4326,17 +4336,13 @@
/* DisplayPort Transport Status */ /* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044 #define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144 #define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, \ #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
DP_TP_STATUS_A, \
DP_TP_STATUS_B)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */ /* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000 #define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100 #define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, \ #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
DDI_BUF_CTL_A, \
DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31) #define DDI_BUF_CTL_ENABLE (1<<31)
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
...@@ -4357,9 +4363,7 @@ ...@@ -4357,9 +4363,7 @@
/* DDI Buffer Translations */ /* DDI Buffer Translations */
#define DDI_BUF_TRANS_A 0x64E00 #define DDI_BUF_TRANS_A 0x64E00
#define DDI_BUF_TRANS_B 0x64E60 #define DDI_BUF_TRANS_B 0x64E60
#define DDI_BUF_TRANS(port) _PORT(port, \ #define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
DDI_BUF_TRANS_A, \
DDI_BUF_TRANS_B)
/* Sideband Interface (SBI) is programmed indirectly, via /* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA, * SBI_ADDR, which contains the register offset; and SBI_DATA,
...@@ -4391,8 +4395,8 @@ ...@@ -4391,8 +4395,8 @@
/* LPT PIXCLK_GATE */ /* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020 #define PIXCLK_GATE 0xC6020
#define PIXCLK_GATE_UNGATE 1<<0 #define PIXCLK_GATE_UNGATE (1<<0)
#define PIXCLK_GATE_GATE 0<<0 #define PIXCLK_GATE_GATE (0<<0)
/* SPLL */ /* SPLL */
#define SPLL_CTL 0x46020 #define SPLL_CTL 0x46020
...@@ -4417,9 +4421,7 @@ ...@@ -4417,9 +4421,7 @@
/* Port clock selection */ /* Port clock selection */
#define PORT_CLK_SEL_A 0x46100 #define PORT_CLK_SEL_A 0x46100
#define PORT_CLK_SEL_B 0x46104 #define PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _PORT(port, \ #define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
PORT_CLK_SEL_A, \
PORT_CLK_SEL_B)
#define PORT_CLK_SEL_LCPLL_2700 (0<<29) #define PORT_CLK_SEL_LCPLL_2700 (0<<29)
#define PORT_CLK_SEL_LCPLL_1350 (1<<29) #define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29) #define PORT_CLK_SEL_LCPLL_810 (2<<29)
...@@ -4430,9 +4432,7 @@ ...@@ -4430,9 +4432,7 @@
/* Pipe clock selection */ /* Pipe clock selection */
#define PIPE_CLK_SEL_A 0x46140 #define PIPE_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144 #define PIPE_CLK_SEL_B 0x46144
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \ #define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
PIPE_CLK_SEL_A, \
PIPE_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */ /* For each pipe, we need to select the corresponding port clock */
#define PIPE_CLK_SEL_DISABLED (0x0<<29) #define PIPE_CLK_SEL_DISABLED (0x0<<29)
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) #define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
...@@ -4447,8 +4447,7 @@ ...@@ -4447,8 +4447,7 @@
/* Pipe WM_LINETIME - watermark line time */ /* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270 #define PIPE_WM_LINETIME_A 0x45270
#define PIPE_WM_LINETIME_B 0x45274 #define PIPE_WM_LINETIME_B 0x45274
#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \ #define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
PIPE_WM_LINETIME_A, \
PIPE_WM_LINETIME_B) PIPE_WM_LINETIME_B)
#define PIPE_WM_LINETIME_MASK (0x1ff) #define PIPE_WM_LINETIME_MASK (0x1ff)
#define PIPE_WM_LINETIME_TIME(x) ((x)) #define PIPE_WM_LINETIME_TIME(x) ((x))
......
...@@ -213,7 +213,7 @@ void i915_setup_sysfs(struct drm_device *dev) ...@@ -213,7 +213,7 @@ void i915_setup_sysfs(struct drm_device *dev)
DRM_ERROR("RC6 residency sysfs setup failed\n"); DRM_ERROR("RC6 residency sysfs setup failed\n");
} }
if (IS_IVYBRIDGE(dev)) { if (HAS_L3_GPU_CACHE(dev)) {
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
if (ret) if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n"); DRM_ERROR("l3 parity sysfs setup failed\n");
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
struct intel_crt { struct intel_crt {
struct intel_encoder base; struct intel_encoder base;
bool force_hotplug_required; bool force_hotplug_required;
u32 adpa_reg;
}; };
static struct intel_crt *intel_attached_crt(struct drm_connector *connector) static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
...@@ -55,6 +56,11 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector) ...@@ -55,6 +56,11 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base); struct intel_crt, base);
} }
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_crt, base);
}
static void pch_crt_dpms(struct drm_encoder *encoder, int mode) static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
...@@ -145,19 +151,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, ...@@ -145,19 +151,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc; struct drm_crtc *crtc = encoder->crtc;
struct intel_crt *crt =
intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_md_reg; int dpll_md_reg;
u32 adpa, dpll_md; u32 adpa, dpll_md;
u32 adpa_reg;
dpll_md_reg = DPLL_MD(intel_crtc->pipe); dpll_md_reg = DPLL_MD(intel_crtc->pipe);
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
else
adpa_reg = ADPA;
/* /*
* Disable separate mode multiplier used when cloning SDVO to CRT * Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning * XXX this needs to be adjusted when we really are cloning
...@@ -185,7 +187,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, ...@@ -185,7 +187,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (!HAS_PCH_SPLIT(dev)) if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
I915_WRITE(adpa_reg, adpa); I915_WRITE(crt->adpa_reg, adpa);
} }
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
...@@ -658,9 +660,7 @@ void intel_crt_init(struct drm_device *dev) ...@@ -658,9 +660,7 @@ void intel_crt_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, &crt->base); intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | crt->base.cloneable = true;
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
if (IS_HASWELL(dev)) if (IS_HASWELL(dev))
crt->base.crtc_mask = (1 << 0); crt->base.crtc_mask = (1 << 0);
else else
...@@ -677,6 +677,13 @@ void intel_crt_init(struct drm_device *dev) ...@@ -677,6 +677,13 @@ void intel_crt_init(struct drm_device *dev)
else else
encoder_helper_funcs = &gmch_encoder_funcs; encoder_helper_funcs = &gmch_encoder_funcs;
if (HAS_PCH_SPLIT(dev))
crt->adpa_reg = PCH_ADPA;
else if (IS_VALLEYVIEW(dev))
crt->adpa_reg = VLV_ADPA;
else
crt->adpa_reg = ADPA;
drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs); drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
......
...@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) ...@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
case PORT_B: case PORT_B:
case PORT_C: case PORT_C:
case PORT_D: case PORT_D:
intel_hdmi_init(dev, DDI_BUF_CTL(port)); intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
break; break;
default: default:
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
...@@ -267,7 +267,8 @@ struct wrpll_tmds_clock { ...@@ -267,7 +267,8 @@ struct wrpll_tmds_clock {
u16 r2; /* Reference divider */ u16 r2; /* Reference divider */
}; };
/* Table of matching values for WRPLL clocks programming for each frequency */ /* Table of matching values for WRPLL clocks programming for each frequency.
* The code assumes this table is sorted. */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18}, {19750, 38, 25, 18},
{20000, 48, 32, 18}, {20000, 48, 32, 18},
...@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { ...@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{23000, 36, 23, 15}, {23000, 36, 23, 15},
{23500, 40, 40, 23}, {23500, 40, 40, 23},
{23750, 26, 16, 14}, {23750, 26, 16, 14},
{23750, 26, 16, 14},
{24000, 36, 24, 15}, {24000, 36, 24, 15},
{25000, 36, 25, 15}, {25000, 36, 25, 15},
{25175, 26, 40, 33}, {25175, 26, 40, 33},
...@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { ...@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{108000, 8, 24, 15}, {108000, 8, 24, 15},
{108108, 8, 173, 108}, {108108, 8, 173, 108},
{109000, 6, 23, 19}, {109000, 6, 23, 19},
{109000, 6, 23, 19},
{110000, 6, 22, 18}, {110000, 6, 22, 18},
{110013, 6, 22, 18}, {110013, 6, 22, 18},
{110250, 8, 49, 30}, {110250, 8, 49, 30},
...@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { ...@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{218250, 4, 42, 26}, {218250, 4, 42, 26},
{218750, 4, 34, 21}, {218750, 4, 34, 21},
{219000, 4, 47, 29}, {219000, 4, 47, 29},
{219000, 4, 47, 29},
{220000, 4, 44, 27}, {220000, 4, 44, 27},
{220640, 4, 49, 30}, {220640, 4, 49, 30},
{220750, 4, 36, 22}, {220750, 4, 36, 22},
...@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, ...@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int port = intel_hdmi->ddi_port; int port = intel_hdmi->ddi_port;
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
int p, n2, r2, valid=0; int p, n2, r2;
u32 temp, i; u32 temp, i;
/* On Haswell, we need to enable the clocks and prepare DDI function to /* On Haswell, we need to enable the clocks and prepare DDI function to
...@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, ...@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
*/ */
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) { for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) { if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
break;
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
p = wrpll_tmds_clock_table[i].p; p = wrpll_tmds_clock_table[i].p;
n2 = wrpll_tmds_clock_table[i].n2; n2 = wrpll_tmds_clock_table[i].n2;
r2 = wrpll_tmds_clock_table[i].r2; r2 = wrpll_tmds_clock_table[i].r2;
DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n", if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
crtc->mode.clock, DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
p, n2, r2); wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
valid = 1; DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
break; crtc->mode.clock, p, n2, r2);
}
}
if (!valid) {
DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
crtc->mode.clock);
return;
}
/* Enable LCPLL if disabled */ /* Enable LCPLL if disabled */
temp = I915_READ(LCPLL_CTL); temp = I915_READ(LCPLL_CTL);
...@@ -723,15 +718,35 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, ...@@ -723,15 +718,35 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
} }
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = I915_READ(DDI_FUNC_CTL(pipe)); temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
temp &= ~PIPE_DDI_PORT_MASK;
temp &= ~PIPE_DDI_BPC_12; switch (intel_crtc->bpp) {
temp |= PIPE_DDI_SELECT_PORT(port) | case 18:
PIPE_DDI_MODE_SELECT_HDMI | temp |= PIPE_DDI_BPC_6;
((intel_crtc->bpp > 24) ? break;
PIPE_DDI_BPC_12 : case 24:
PIPE_DDI_BPC_8) | temp |= PIPE_DDI_BPC_8;
PIPE_DDI_FUNC_ENABLE; break;
case 30:
temp |= PIPE_DDI_BPC_10;
break;
case 36:
temp |= PIPE_DDI_BPC_12;
break;
default:
WARN(1, "%d bpp unsupported by pipe DDI function\n",
intel_crtc->bpp);
}
if (intel_hdmi->has_hdmi_sink)
temp |= PIPE_DDI_MODE_SELECT_HDMI;
else
temp |= PIPE_DDI_MODE_SELECT_DVI;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= PIPE_DDI_PVSYNC;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
temp |= PIPE_DDI_PHSYNC;
I915_WRITE(DDI_FUNC_CTL(pipe), temp); I915_WRITE(DDI_FUNC_CTL(pipe), temp);
......
This diff is collapsed.
...@@ -36,42 +36,10 @@ ...@@ -36,42 +36,10 @@
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_drm.h" #include "i915_drm.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "drm_dp_helper.h"
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6 #define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000) #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
struct edid *edid; /* cached EDID for eDP */
int edid_mode_count;
};
/** /**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH) * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct * @intel_dp: DP struct
...@@ -1668,6 +1636,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, ...@@ -1668,6 +1636,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
} else {
dp_reg_value &= ~DP_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
dp_reg_value |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
dp_reg_value |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
break;
}
}
I915_WRITE(intel_dp->output_reg, dp_reg_value); I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg); POSTING_READ(intel_dp->output_reg);
...@@ -1675,12 +1682,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, ...@@ -1675,12 +1682,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_SET,
dp_train_pat); dp_train_pat);
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
DP_TRAINING_PATTERN_DISABLE) {
ret = intel_dp_aux_native_write(intel_dp, ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET, DP_TRAINING_LANE0_SET,
intel_dp->train_set, intel_dp->train_set,
intel_dp->lane_count); intel_dp->lane_count);
if (ret != intel_dp->lane_count) if (ret != intel_dp->lane_count)
return false; return false;
}
return true; return true;
} }
...@@ -1696,7 +1706,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) ...@@ -1696,7 +1706,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t voltage; uint8_t voltage;
bool clock_recovery = false; bool clock_recovery = false;
int voltage_tries, loop_tries; int voltage_tries, loop_tries;
u32 reg;
uint32_t DP = intel_dp->DP; uint32_t DP = intel_dp->DP;
/* /*
...@@ -1717,10 +1726,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) ...@@ -1717,10 +1726,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP |= DP_PORT_EN; DP |= DP_PORT_EN;
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4); memset(intel_dp->train_set, 0, 4);
voltage = 0xff; voltage = 0xff;
voltage_tries = 0; voltage_tries = 0;
...@@ -1744,12 +1749,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) ...@@ -1744,12 +1749,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
} }
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) if (!intel_dp_set_link_train(intel_dp, DP,
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
DP_TRAINING_PATTERN_1 | DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) DP_LINK_SCRAMBLING_DISABLE))
break; break;
...@@ -1804,10 +1804,8 @@ static void ...@@ -1804,10 +1804,8 @@ static void
intel_dp_complete_link_train(struct intel_dp *intel_dp) intel_dp_complete_link_train(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp->base.base.dev; struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false; bool channel_eq = false;
int tries, cr_tries; int tries, cr_tries;
u32 reg;
uint32_t DP = intel_dp->DP; uint32_t DP = intel_dp->DP;
/* channel equalization */ /* channel equalization */
...@@ -1836,13 +1834,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ...@@ -1836,13 +1834,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
} }
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */ /* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg, if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_2 | DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE)) DP_LINK_SCRAMBLING_DISABLE))
break; break;
...@@ -1877,15 +1870,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ...@@ -1877,15 +1870,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries; ++tries;
} }
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
I915_WRITE(intel_dp->output_reg, reg);
POSTING_READ(intel_dp->output_reg);
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
} }
static void static void
...@@ -2441,7 +2426,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect ...@@ -2441,7 +2426,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
} }
void void
intel_dp_init(struct drm_device *dev, int output_reg) intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector; struct drm_connector *connector;
...@@ -2456,6 +2441,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) ...@@ -2456,6 +2441,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return; return;
intel_dp->output_reg = output_reg; intel_dp->output_reg = output_reg;
intel_dp->port = port;
intel_dp->dpms_mode = -1; intel_dp->dpms_mode = -1;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
...@@ -2483,18 +2469,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) ...@@ -2483,18 +2469,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
connector->polled = DRM_CONNECTOR_POLL_HPD; connector->polled = DRM_CONNECTOR_POLL_HPD;
if (output_reg == DP_B || output_reg == PCH_DP_B) intel_encoder->cloneable = false;
intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
else if (output_reg == DP_C || output_reg == PCH_DP_C)
intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
if (is_edp(intel_dp)) {
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
ironlake_panel_vdd_work); ironlake_panel_vdd_work);
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
...@@ -2509,28 +2487,25 @@ intel_dp_init(struct drm_device *dev, int output_reg) ...@@ -2509,28 +2487,25 @@ intel_dp_init(struct drm_device *dev, int output_reg)
drm_sysfs_connector_add(connector); drm_sysfs_connector_add(connector);
/* Set up the DDC bus. */ /* Set up the DDC bus. */
switch (output_reg) { switch (port) {
case DP_A: case PORT_A:
name = "DPDDC-A"; name = "DPDDC-A";
break; break;
case DP_B: case PORT_B:
case PCH_DP_B: dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
dev_priv->hotplug_supported_mask |=
DPB_HOTPLUG_INT_STATUS;
name = "DPDDC-B"; name = "DPDDC-B";
break; break;
case DP_C: case PORT_C:
case PCH_DP_C: dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
dev_priv->hotplug_supported_mask |=
DPC_HOTPLUG_INT_STATUS;
name = "DPDDC-C"; name = "DPDDC-C";
break; break;
case DP_D: case PORT_D:
case PCH_DP_D: dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
dev_priv->hotplug_supported_mask |=
DPD_HOTPLUG_INT_STATUS;
name = "DPDDC-D"; name = "DPDDC-D";
break; break;
default:
WARN(1, "Invalid port %c\n", port_name(port));
break;
} }
intel_dp_i2c_init(intel_dp, intel_connector, name); intel_dp_i2c_init(intel_dp, intel_connector, name);
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "drm_crtc.h" #include "drm_crtc.h"
#include "drm_crtc_helper.h" #include "drm_crtc_helper.h"
#include "drm_fb_helper.h" #include "drm_fb_helper.h"
#include "drm_dp_helper.h"
#define _wait_for(COND, MS, W) ({ \ #define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
...@@ -90,25 +91,6 @@ ...@@ -90,25 +91,6 @@
#define INTEL_OUTPUT_DISPLAYPORT 7 #define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8 #define INTEL_OUTPUT_EDP 8
/* Intel Pipe Clone Bit */
#define INTEL_HDMIB_CLONE_BIT 1
#define INTEL_HDMIC_CLONE_BIT 2
#define INTEL_HDMID_CLONE_BIT 3
#define INTEL_HDMIE_CLONE_BIT 4
#define INTEL_HDMIF_CLONE_BIT 5
#define INTEL_SDVO_NON_TV_CLONE_BIT 6
#define INTEL_SDVO_TV_CLONE_BIT 7
#define INTEL_SDVO_LVDS_CLONE_BIT 8
#define INTEL_ANALOG_CLONE_BIT 9
#define INTEL_TV_CLONE_BIT 10
#define INTEL_DP_B_CLONE_BIT 11
#define INTEL_DP_C_CLONE_BIT 12
#define INTEL_DP_D_CLONE_BIT 13
#define INTEL_LVDS_CLONE_BIT 14
#define INTEL_DVO_TMDS_CLONE_BIT 15
#define INTEL_DVO_LVDS_CLONE_BIT 16
#define INTEL_EDP_CLONE_BIT 17
#define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1 #define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2 #define INTEL_DVO_CHIP_TMDS 2
...@@ -153,9 +135,13 @@ struct intel_encoder { ...@@ -153,9 +135,13 @@ struct intel_encoder {
struct drm_encoder base; struct drm_encoder base;
int type; int type;
bool needs_tv_clock; bool needs_tv_clock;
/*
* Intel hw has only one MUX where encoders could be clone, hence a
* simple flag is enough to compute the possible_clones mask.
*/
bool cloneable;
void (*hot_plug)(struct intel_encoder *); void (*hot_plug)(struct intel_encoder *);
int crtc_mask; int crtc_mask;
int clone_mask;
}; };
struct intel_connector { struct intel_connector {
...@@ -171,8 +157,6 @@ struct intel_crtc { ...@@ -171,8 +157,6 @@ struct intel_crtc {
int dpms_mode; int dpms_mode;
bool active; /* is the crtc on? independent of the dpms mode */ bool active; /* is the crtc on? independent of the dpms mode */
bool primary_disabled; /* is the crtc obscured by a plane? */ bool primary_disabled; /* is the crtc obscured by a plane? */
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail; bool lowfreq_avail;
struct intel_overlay *overlay; struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work; struct intel_unpin_work *unpin_work;
...@@ -311,6 +295,38 @@ struct intel_hdmi { ...@@ -311,6 +295,38 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
}; };
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
enum port port;
uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
struct edid *edid; /* cached EDID for eDP */
int edid_mode_count;
};
static inline struct drm_crtc * static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{ {
...@@ -348,17 +364,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector); ...@@ -348,17 +364,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern void intel_crt_init(struct drm_device *dev); extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob); bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev); extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev, extern void intel_mark_busy(struct drm_device *dev);
struct drm_i915_gem_object *obj); extern void intel_mark_idle(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev); extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg); extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
void void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
...@@ -371,8 +391,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); ...@@ -371,8 +391,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane); enum plane plane);
void intel_sanitize_pm(struct drm_device *dev);
/* intel_panel.c */ /* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#define SIL164_ADDR 0x38 #define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76 #define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38 #define TFP410_ADDR 0x38
#define NS2501_ADDR 0x38
static const struct intel_dvo_device intel_dvo_devices[] = { static const struct intel_dvo_device intel_dvo_devices[] = {
{ {
...@@ -74,6 +75,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = { ...@@ -74,6 +75,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.slave_addr = 0x75, .slave_addr = 0x75,
.gpio = GMBUS_PORT_DPB, .gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops, .dev_ops = &ch7017_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ns2501",
.dvo_reg = DVOC,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
} }
}; };
...@@ -396,17 +404,14 @@ void intel_dvo_init(struct drm_device *dev) ...@@ -396,17 +404,14 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1); intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) { switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS: case INTEL_DVO_CHIP_TMDS:
intel_encoder->clone_mask = intel_encoder->cloneable = true;
(1 << INTEL_DVO_TMDS_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
drm_connector_init(dev, connector, drm_connector_init(dev, connector,
&intel_dvo_connector_funcs, &intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII); DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS; encoder_type = DRM_MODE_ENCODER_TMDS;
break; break;
case INTEL_DVO_CHIP_LVDS: case INTEL_DVO_CHIP_LVDS:
intel_encoder->clone_mask = intel_encoder->cloneable = false;
(1 << INTEL_DVO_LVDS_CLONE_BIT);
drm_connector_init(dev, connector, drm_connector_init(dev, connector,
&intel_dvo_connector_funcs, &intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS); DRM_MODE_CONNECTOR_LVDS);
......
...@@ -889,7 +889,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c ...@@ -889,7 +889,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector); intel_attach_broadcast_rgb_property(connector);
} }
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector; struct drm_connector *connector;
...@@ -923,48 +923,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) ...@@ -923,48 +923,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
/* Set up the DDC bus. */ intel_encoder->cloneable = false;
if (sdvox_reg == SDVOB) {
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); intel_hdmi->ddi_port = port;
intel_hdmi->ddc_bus = GMBUS_PORT_DPB; switch (port) {
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; case PORT_B:
} else if (sdvox_reg == SDVOC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPB; intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
intel_hdmi->ddi_port = PORT_B;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) { break;
DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n"); case PORT_C:
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPC; intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
intel_hdmi->ddi_port = PORT_C;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) { break;
DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n"); case PORT_D:
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD; intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
intel_hdmi->ddi_port = PORT_D;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
} else { break;
/* If we got an unknown sdvox_reg, things are pretty much broken case PORT_A:
* in a way that we should let the kernel know about it */ /* Internal port only for eDP. */
default:
BUG(); BUG();
} }
......
...@@ -967,7 +967,7 @@ bool intel_lvds_init(struct drm_device *dev) ...@@ -967,7 +967,7 @@ bool intel_lvds_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder); intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS; intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); intel_encoder->cloneable = false;
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev)) else if (IS_GEN4(dev))
......
This diff is collapsed.
...@@ -218,11 +218,6 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, ...@@ -218,11 +218,6 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 scratch_addr = pc->gtt_offset + 128; u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
ret = intel_emit_post_sync_nonzero_flush(ring);
if (ret)
return ret;
/* Just flush everything. Experiments have shown that reducing the /* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance * number of bits based on the write domains has little performance
* impact. * impact.
...@@ -262,6 +257,20 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, ...@@ -262,6 +257,20 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
return 0; return 0;
} }
static int
gen6_render_ring_flush__wa(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
ret = intel_emit_post_sync_nonzero_flush(ring);
if (ret)
return ret;
return gen6_render_ring_flush(ring, invalidate_domains, flush_domains);
}
static void ring_write_tail(struct intel_ring_buffer *ring, static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value) u32 value)
{ {
...@@ -462,7 +471,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) ...@@ -462,7 +471,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 6) if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (IS_IVYBRIDGE(dev)) if (HAS_L3_GPU_CACHE(dev))
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
return ret; return ret;
...@@ -628,26 +637,24 @@ pc_render_add_request(struct intel_ring_buffer *ring, ...@@ -628,26 +637,24 @@ pc_render_add_request(struct intel_ring_buffer *ring,
} }
static u32 static u32
gen6_ring_get_seqno(struct intel_ring_buffer *ring) gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{ {
struct drm_device *dev = ring->dev;
/* Workaround to force correct ordering between irq and seqno writes on /* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like * ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */ * ACTHD) before reading the status page. */
if (IS_GEN6(dev) || IS_GEN7(dev)) if (!lazy_coherency)
intel_ring_get_active_head(ring); intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX); return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
} }
static u32 static u32
ring_get_seqno(struct intel_ring_buffer *ring) ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{ {
return intel_read_status_page(ring, I915_GEM_HWS_INDEX); return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
} }
static u32 static u32
pc_render_get_seqno(struct intel_ring_buffer *ring) pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{ {
struct pipe_control *pc = ring->private; struct pipe_control *pc = ring->private;
return pc->cpu_page[0]; return pc->cpu_page[0];
...@@ -852,7 +859,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) ...@@ -852,7 +859,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) { if (ring->irq_refcount++ == 0) {
if (IS_IVYBRIDGE(dev) && ring->id == RCS) if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
GEN6_RENDER_L3_PARITY_ERROR)); GEN6_RENDER_L3_PARITY_ERROR));
else else
...@@ -875,7 +882,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) ...@@ -875,7 +882,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) { if (--ring->irq_refcount == 0) {
if (IS_IVYBRIDGE(dev) && ring->id == RCS) if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
else else
I915_WRITE_IMR(ring, ~0); I915_WRITE_IMR(ring, ~0);
...@@ -1010,7 +1017,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, ...@@ -1010,7 +1017,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->dev = dev; ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = 32 * PAGE_SIZE; ring->size = 32 * PAGE_SIZE;
init_waitqueue_head(&ring->irq_queue); init_waitqueue_head(&ring->irq_queue);
...@@ -1380,6 +1386,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -1380,6 +1386,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request; ring->add_request = gen6_add_request;
ring->flush = gen6_render_ring_flush; ring->flush = gen6_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush__wa;
ring->irq_get = gen6_ring_get_irq; ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq; ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT; ring->irq_enable_mask = GT_USER_INTERRUPT;
...@@ -1481,7 +1489,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ...@@ -1481,7 +1489,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->dev = dev; ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = size; ring->size = size;
ring->effective_size = ring->size; ring->effective_size = ring->size;
...@@ -1574,3 +1581,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ...@@ -1574,3 +1581,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring); return intel_init_ring_buffer(dev, ring);
} }
int
intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
{
int ret;
if (!ring->gpu_caches_dirty)
return 0;
ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
ring->gpu_caches_dirty = false;
return 0;
}
int
intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
{
uint32_t flush_domains;
int ret;
flush_domains = 0;
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
ring->gpu_caches_dirty = false;
return 0;
}
...@@ -72,7 +72,14 @@ struct intel_ring_buffer { ...@@ -72,7 +72,14 @@ struct intel_ring_buffer {
u32 flush_domains); u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring, int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno); u32 *seqno);
u32 (*get_seqno)(struct intel_ring_buffer *ring); /* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length); u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring); void (*cleanup)(struct intel_ring_buffer *ring);
...@@ -100,15 +107,6 @@ struct intel_ring_buffer { ...@@ -100,15 +107,6 @@ struct intel_ring_buffer {
*/ */
struct list_head request_list; struct list_head request_list;
/**
* List of objects currently pending a GPU write flush.
*
* All elements on this list will belong to either the
* active_list or flushing_list, last_rendering_seqno can
* be used to differentiate between the two elements.
*/
struct list_head gpu_write_list;
/** /**
* Do we have some not yet emitted requests outstanding? * Do we have some not yet emitted requests outstanding?
*/ */
...@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring, ...@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring); void intel_ring_advance(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev);
......
...@@ -2081,8 +2081,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2081,8 +2081,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true; intel_sdvo->is_hdmi = true;
} }
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | intel_sdvo->base.cloneable = true;
(1 << INTEL_ANALOG_CLONE_BIT));
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi) if (intel_sdvo->is_hdmi)
...@@ -2113,7 +2112,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) ...@@ -2113,7 +2112,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->is_tv = true; intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true; intel_sdvo->base.needs_tv_clock = true;
intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; intel_sdvo->base.cloneable = false;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
...@@ -2156,8 +2155,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2156,8 +2155,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
} }
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | intel_sdvo->base.cloneable = true;
(1 << INTEL_ANALOG_CLONE_BIT));
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo); intel_sdvo);
...@@ -2189,8 +2187,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2189,8 +2187,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
} }
intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
(1 << INTEL_SDVO_LVDS_CLONE_BIT)); * as opposed to native LVDS, where we upscale with the panel-fitter
* (and hence only the native LVDS resolution could be cloned). */
intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
......
...@@ -1622,7 +1622,7 @@ intel_tv_init(struct drm_device *dev) ...@@ -1622,7 +1622,7 @@ intel_tv_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder); intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1); intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); intel_encoder->cloneable = false;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown; intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
......
...@@ -203,6 +203,9 @@ typedef struct _drm_i915_sarea { ...@@ -203,6 +203,9 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_WAIT 0x2c #define DRM_I915_GEM_WAIT 0x2c
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d #define DRM_I915_GEM_CONTEXT_CREATE 0x2d
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
#define DRM_I915_GEM_SET_CACHEING 0x2f
#define DRM_I915_GEM_GET_CACHEING 0x30
#define DRM_I915_REG_READ 0x31
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
...@@ -227,6 +230,8 @@ typedef struct _drm_i915_sarea { ...@@ -227,6 +230,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
#define DRM_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHEING, struct drm_i915_gem_cacheing)
#define DRM_IOCTL_I915_GEM_GET_CACHEING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHEING, struct drm_i915_gem_cacheing)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
...@@ -249,6 +254,7 @@ typedef struct _drm_i915_sarea { ...@@ -249,6 +254,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
/* Allow drivers to submit batchbuffers directly to hardware, relying /* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware. * on the security mechanisms provided by hardware.
...@@ -305,6 +311,7 @@ typedef struct drm_i915_irq_wait { ...@@ -305,6 +311,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_LLC 17 #define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18 #define I915_PARAM_HAS_ALIASING_PPGTT 18
#define I915_PARAM_HAS_WAIT_TIMEOUT 19 #define I915_PARAM_HAS_WAIT_TIMEOUT 19
#define I915_PARAM_HAS_SEMAPHORES 20
typedef struct drm_i915_getparam { typedef struct drm_i915_getparam {
int param; int param;
...@@ -698,10 +705,31 @@ struct drm_i915_gem_busy { ...@@ -698,10 +705,31 @@ struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */ /** Handle of the buffer to check for busy */
__u32 handle; __u32 handle;
/** Return busy status (1 if busy, 0 if idle) */ /** Return busy status (1 if busy, 0 if idle).
* The high word is used to indicate on which rings the object
* currently resides:
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
*/
__u32 busy; __u32 busy;
}; };
#define I915_CACHEING_NONE 0
#define I915_CACHEING_CACHED 1
struct drm_i915_gem_cacheing {
/**
* Handle of the buffer to set/get the cacheing level of. */
__u32 handle;
/**
* Cacheing level to apply or return value
*
* bits0-15 are for generic cacheing control (i.e. the above defined
* values). bits16-31 are reserved for platform-specific variations
* (e.g. l3$ caching on gen7). */
__u32 cacheing;
};
#define I915_TILING_NONE 0 #define I915_TILING_NONE 0
#define I915_TILING_X 1 #define I915_TILING_X 1
#define I915_TILING_Y 2 #define I915_TILING_Y 2
...@@ -918,4 +946,8 @@ struct drm_i915_gem_context_destroy { ...@@ -918,4 +946,8 @@ struct drm_i915_gem_context_destroy {
__u32 pad; __u32 pad;
}; };
struct drm_i915_reg_read {
__u64 offset;
__u64 val; /* Return value */
};
#endif /* _I915_DRM_H_ */ #endif /* _I915_DRM_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment