Commit 269b62db authored by Dave Airlie's avatar Dave Airlie

Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:

" First -next pull for 3.7. Highlights:
- hsw hdmi improvements (Paulo)
- ips/rps locking rework and cleanups
- rc6 on ilk by default again
- hw context&dp&dpff support for hsw (Ben)
- GET_PARAM_HAS_SEMAPHORES (Chris)
- gen6+ pipe_control improvements (Chris)
- set_cacheing ioctl and assorted support code (Chris)
- cleanups around the busy/idle/pm code (Chris&me)
- flushing_list removal, hopefully for good (Chris)
- read_reg ioctl (Ben)
- support the ns2501 dvo (Thomas Richter)
- avoid the costly gen6+ "missed IRQ" workaround where we don't need a
  race-free seqno readback (Chris)
- various bits&pieces, mostly early patches from the modeset rework branch"

* 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel: (54 commits)
  drm/i915: don't grab dev->struct_mutex for userspace forcewak
  drm/i915: try harder to find WR PLL clock settings
  drm/i915: use the correct encoder type when comparing
  drm/i915: Lazily apply the SNB+ seqno w/a
  drm/i915: enable rc6 on ilk again
  drm/i915: fix up ilk drps/ips locking
  drm/i915: DE_PCU_EVENT irq is ilk-only
  drm/i915: kill dev_priv->mchdev_lock
  drm/i915: move all rps state into dev_priv->rps
  drm/i915: use mutex_lock_interruptible for debugfs files
  drm/i915: fixup up debugfs rps state handling
  drm/i915: properly guard ilk ips state
  drm/i915: add parentheses around PIXCLK_GATE definitions
  drm/i915: reindent Haswell register definitions
  drm/i915: completely reset the value of DDI_FUNC_CTL
  drm/i915: correctly set the DDI_FUNC_CTL bpc field
  drm/i915: set the DDI sync polarity bits
  drm/i915: fix pipe DDI mode select
  drm/i915: dump the device info
  drm/i915: fixup desired rps frequency computation
  ...
parents d9875690 a22ddff8
......@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
dvo_ivch.o \
dvo_tfp410.o \
dvo_sil164.o \
dvo_ns2501.o \
i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
......
......@@ -140,5 +140,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
extern struct intel_dvo_dev_ops ns2501_ops;
#endif /* _INTEL_DVO_H */
This diff is collapsed.
......@@ -44,7 +44,6 @@
enum {
ACTIVE_LIST,
FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
};
......@@ -62,28 +61,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", info->gen);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);
B(is_i915g);
B(is_i945gm);
B(is_g33);
B(need_gfx_hws);
B(is_g4x);
B(is_pineview);
B(is_broadwater);
B(is_crestline);
B(has_fbc);
B(has_pipe_cxsr);
B(has_hotplug);
B(cursor_needs_physical);
B(has_overlay);
B(overlay_needs_physical);
B(supports_tv);
B(has_bsd_ring);
B(has_blt_ring);
B(has_llc);
#undef B
#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
#define DEV_INFO_SEP ;
DEV_INFO_FLAGS;
#undef DEV_INFO_FLAG
#undef DEV_INFO_SEP
return 0;
}
......@@ -121,14 +103,15 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
obj->last_rendering_seqno,
obj->last_read_seqno,
obj->last_write_seqno,
obj->last_fenced_seqno,
cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
......@@ -177,10 +160,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
default:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
......@@ -238,7 +217,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.active_list, mm_list);
count_objects(&dev_priv->mm.flushing_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
......@@ -413,7 +391,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
{
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n",
ring->name, ring->get_seqno(ring));
ring->name, ring->get_seqno(ring, false));
}
}
......@@ -630,12 +608,12 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
err->write_domain,
err->seqno,
err->rseqno, err->wseqno,
pin_flag(err->pinned),
tiling_flag(err->tiling),
dirty_flag(err->dirty),
......@@ -799,10 +777,14 @@ i915_error_state_write(struct file *filp,
struct seq_file *m = filp->private_data;
struct i915_error_state_file_priv *error_priv = m->private;
struct drm_device *dev = error_priv->dev;
int ret;
DRM_DEBUG_DRIVER("Resetting error state\n");
mutex_lock(&dev->struct_mutex);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
i915_destroy_error_state(dev);
mutex_unlock(&dev->struct_mutex);
......@@ -1292,7 +1274,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
......@@ -1472,8 +1455,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
mutex_lock(&dev->struct_mutex);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
......@@ -1674,7 +1661,7 @@ i915_ring_stop_write(struct file *filp,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
int val = 0;
int val = 0, ret;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
......@@ -1689,7 +1676,10 @@ i915_ring_stop_write(struct file *filp,
DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
mutex_lock(&dev->struct_mutex);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
dev_priv->stop_rings = val;
mutex_unlock(&dev->struct_mutex);
......@@ -1713,10 +1703,18 @@ i915_max_freq_read(struct file *filp,
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
int len;
int len, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"max freq: %d\n", dev_priv->max_delay * 50);
"max freq: %d\n", dev_priv->rps.max_delay * 50);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
......@@ -1733,7 +1731,10 @@ i915_max_freq_write(struct file *filp,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
int val = 1;
int val = 1, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
......@@ -1748,12 +1749,17 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
/*
* Turbo will still be enabled, but won't go above the set value.
*/
dev_priv->max_delay = val / 50;
dev_priv->rps.max_delay = val / 50;
gen6_set_rps(dev, val / 50);
mutex_unlock(&dev->struct_mutex);
return cnt;
}
......@@ -1773,10 +1779,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
int len;
int len, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"min freq: %d\n", dev_priv->min_delay * 50);
"min freq: %d\n", dev_priv->rps.min_delay * 50);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
......@@ -1791,7 +1805,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
int val = 1;
int val = 1, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
......@@ -1806,12 +1823,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
/*
* Turbo will still be enabled, but won't go below the set value.
*/
dev_priv->min_delay = val / 50;
dev_priv->rps.min_delay = val / 50;
gen6_set_rps(dev, val / 50);
mutex_unlock(&dev->struct_mutex);
return cnt;
}
......@@ -1834,9 +1856,15 @@ i915_cache_sharing_read(struct file *filp,
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
u32 snpcr;
int len;
int len, ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
mutex_lock(&dev_priv->dev->struct_mutex);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
mutex_unlock(&dev_priv->dev->struct_mutex);
......@@ -1862,6 +1890,9 @@ i915_cache_sharing_write(struct file *filp,
u32 snpcr;
int val = 1;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
return -EINVAL;
......@@ -1925,16 +1956,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (INTEL_INFO(dev)->gen < 6)
return 0;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
gen6_gt_force_wake_get(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
......@@ -1947,16 +1973,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
/*
* It's bad that we can potentially hang userspace if struct_mutex gets
* forever stuck. However, if we cannot acquire this lock it means that
* almost certainly the driver has hung, is not unload-able. Therefore
* hanging here is probably a minor inconvenience not to be seen my
* almost every user.
*/
mutex_lock(&dev->struct_mutex);
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
......@@ -2006,7 +2023,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
......@@ -2067,6 +2083,7 @@ int i915_debugfs_init(struct drm_minor *minor)
&i915_cache_sharing_fops);
if (ret)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_ring_stop",
&i915_ring_stop_fops);
......
......@@ -1009,6 +1009,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
break;
case I915_PARAM_HAS_SEMAPHORES:
value = i915_semaphore_is_enabled(dev);
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
......@@ -1425,6 +1428,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
kfree(ap);
}
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = dev_priv->info;
#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
#define DEV_INFO_SEP ,
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
info->gen,
dev_priv->dev->pdev->device,
DEV_INFO_FLAGS);
#undef DEV_INFO_FLAG
#undef DEV_INFO_SEP
}
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
......@@ -1449,7 +1467,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
......@@ -1465,6 +1482,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;
i915_dump_device_info(dev_priv);
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
......@@ -1586,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
spin_lock_init(&dev_priv->rps.lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
......@@ -1835,6 +1854,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
......@@ -1857,6 +1878,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
......
......@@ -1060,7 +1060,7 @@ static bool IS_DISPLAYREG(u32 reg)
* This should make it easier to transition modules over to the
* new register block scheme, since we can do it incrementally.
*/
if (reg >= 0x180000)
if (reg >= VLV_DISPLAY_BASE)
return false;
if (reg >= RENDER_RING_BASE &&
......@@ -1180,3 +1180,49 @@ __i915_write(16, w)
__i915_write(32, l)
__i915_write(64, q)
#undef __i915_write
static const struct register_whitelist {
uint64_t offset;
uint32_t size;
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
} whitelist[] = {
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
};
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
int i;
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (entry->offset == reg->offset &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
switch (entry->size) {
case 8:
reg->val = I915_READ64(reg->offset);
break;
case 4:
reg->val = I915_READ(reg->offset);
break;
case 2:
reg->val = I915_READ16(reg->offset);
break;
case 1:
reg->val = I915_READ8(reg->offset);
break;
default:
WARN_ON(1);
return -EINVAL;
}
return 0;
}
......@@ -109,6 +109,7 @@ struct intel_pch_pll {
#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
#define WATCH_GTT 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
......@@ -221,7 +222,7 @@ struct drm_i915_error_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
u32 seqno;
u32 rseqno, wseqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
......@@ -248,7 +249,6 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
void (*sanitize_pm)(struct drm_device *dev);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
......@@ -279,6 +279,32 @@ struct drm_i915_gt_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
#define DEV_INFO_FLAGS \
DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_llc)
struct intel_device_info {
u8 gen;
u8 is_mobile:1;
......@@ -695,17 +721,6 @@ typedef struct drm_i915_private {
*/
struct list_head active_list;
/**
* List of objects which are not in the ringbuffer but which
* still have a write_domain which needs to be flushed before
* unbinding.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
......@@ -796,9 +811,6 @@ typedef struct drm_i915_private {
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
struct work_struct idle_work;
struct timer_list idle_timer;
bool busy;
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
......@@ -807,9 +819,21 @@ typedef struct drm_i915_private {
bool mchbar_need_disable;
struct work_struct rps_work;
spinlock_t rps_lock;
u32 pm_iir;
/* gen6+ rps state */
struct {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
} rps;
u8 cur_delay;
u8 min_delay;
......@@ -826,7 +850,6 @@ typedef struct drm_i915_private {
int c_m;
int r_t;
u8 corr;
spinlock_t *mchdev_lock;
enum no_fbc_reason no_fbc_reason;
......@@ -861,9 +884,9 @@ enum hdmi_force_audio {
};
enum i915_cache_level {
I915_CACHE_NONE,
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+ */
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
struct drm_i915_gem_object {
......@@ -873,18 +896,16 @@ struct drm_i915_gem_object {
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
/** This object's place on the active/flushing/inactive lists */
/** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
* This is set if the object is on the active or flushing lists
* (has pending rendering), and is not set if it's on inactive (ready
* to be unbound).
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
unsigned int active:1;
......@@ -894,12 +915,6 @@ struct drm_i915_gem_object {
*/
unsigned int dirty:1;
/**
* This is set if the object has been written to since the last
* GPU flush.
*/
unsigned int pending_gpu_write:1;
/**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
......@@ -992,7 +1007,8 @@ struct drm_i915_gem_object {
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
uint32_t last_read_seqno;
uint32_t last_write_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
......@@ -1135,6 +1151,8 @@ struct drm_i915_file_private {
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#include "i915_trace.h"
/**
......@@ -1256,6 +1274,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
......@@ -1274,9 +1296,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
......@@ -1291,7 +1310,6 @@ void i915_gem_lastclose(struct drm_device *dev);
int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
......@@ -1358,9 +1376,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
......@@ -1429,7 +1447,9 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
unsigned alignment,
unsigned cache_level,
bool mappable);
int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
/* i915_gem_stolen.c */
......@@ -1529,6 +1549,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* overlay */
#ifdef CONFIG_DEBUG_FS
......
This diff is collapsed.
......@@ -97,8 +97,7 @@
static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
static int do_switch(struct drm_i915_gem_object *from_obj,
struct i915_hw_context *to, u32 seqno);
static int do_switch(struct i915_hw_context *to);
static int get_context_size(struct drm_device *dev)
{
......@@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev)
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
default:
BUG();
......@@ -220,19 +222,20 @@ static int create_default_context(struct drm_i915_private *dev_priv)
*/
dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
if (ret) {
do_destroy(ctx);
return ret;
}
if (ret)
goto err_destroy;
ret = do_switch(NULL, ctx, 0);
if (ret) {
i915_gem_object_unpin(ctx->obj);
do_destroy(ctx);
} else {
DRM_DEBUG_DRIVER("Default HW context loaded\n");
}
ret = do_switch(ctx);
if (ret)
goto err_unpin;
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
err_unpin:
i915_gem_object_unpin(ctx->obj);
err_destroy:
do_destroy(ctx);
return ret;
}
......@@ -359,17 +362,18 @@ mi_set_context(struct intel_ring_buffer *ring,
return ret;
}
static int do_switch(struct drm_i915_gem_object *from_obj,
struct i915_hw_context *to,
u32 seqno)
static int do_switch(struct i915_hw_context *to)
{
struct intel_ring_buffer *ring = NULL;
struct intel_ring_buffer *ring = to->ring;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
u32 hw_flags = 0;
int ret;
BUG_ON(to == NULL);
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
if (from_obj == to->obj)
return 0;
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
if (ret)
return ret;
......@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
ring = to->ring;
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
i915_gem_object_unpin(to->obj);
......@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring, seqno);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
......@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* swapped, but there is no way to do that yet.
*/
from_obj->dirty = 1;
BUG_ON(from_obj->ring != to->ring);
BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj);
drm_gem_object_unreference(&from_obj->base);
......@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
int to_id)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_file_private *file_priv = NULL;
struct i915_hw_context *to;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
if (dev_priv->hw_contexts_disabled)
return 0;
......@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring,
if (ring != &dev_priv->ring[RCS])
return 0;
if (file)
file_priv = file->driver_priv;
if (to_id == DEFAULT_CONTEXT_ID) {
to = ring->default_context;
} else {
to = i915_gem_context_get(file_priv, to_id);
if (file == NULL)
return -EINVAL;
to = i915_gem_context_get(file->driver_priv, to_id);
if (to == NULL)
return -ENOENT;
}
if (from_obj == to->obj)
return 0;
return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
return do_switch(to);
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
......
......@@ -44,7 +44,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
int
i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable)
unsigned alignment, unsigned cache_level,
bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
......@@ -79,11 +80,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
min_size, alignment, 0,
min_size, alignment, cache_level,
0, dev_priv->mm.gtt_mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space,
min_size, alignment, 0);
min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
......@@ -93,23 +94,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
if (obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (!obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
goto found;
}
......@@ -172,7 +156,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
......@@ -189,8 +172,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
i915_gem_retire_requests(dev);
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
......
......@@ -34,180 +34,6 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
uint32_t flips;
};
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
* as that may want to be batched with other set_domain operations
*
* This is (we hope) the only really tricky part of gem. The goal
* is fairly simple -- track which caches hold bits of the object
* and make sure they remain coherent. A few concrete examples may
* help to explain how it works. For shorthand, we use the notation
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
* a pair of read and write domain masks.
*
* Case 1: the batch buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Mapped to GTT
* 4. Read by GPU
* 5. Unmapped from GTT
* 6. Freed
*
* Let's take these a step at a time
*
* 1. Allocated
* Pages allocated from the kernel may still have
* cache contents, so we set them to (CPU, CPU) always.
* 2. Written by CPU (using pwrite)
* The pwrite function calls set_domain (CPU, CPU) and
* this function does nothing (as nothing changes)
* 3. Mapped by GTT
* This function asserts that the object is not
* currently in any GPU-based read or write domains
* 4. Read by GPU
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
* As write_domain is zero, this function adds in the
* current read domains (CPU+COMMAND, 0).
* flush_domains is set to CPU.
* invalidate_domains is set to COMMAND
* clflush is run to get data out of the CPU caches
* then i915_dev_set_domain calls i915_gem_flush to
* emit an MI_FLUSH and drm_agp_chipset_flush
* 5. Unmapped from GTT
* i915_gem_object_unbind calls set_domain (CPU, CPU)
* flush_domains and invalidate_domains end up both zero
* so no flushing/invalidating happens
* 6. Freed
* yay, done
*
* Case 2: The shared render buffer
*
* 1. Allocated
* 2. Mapped to GTT
* 3. Read/written by GPU
* 4. set_domain to (CPU,CPU)
* 5. Read/written by CPU
* 6. Read/written by GPU
*
* 1. Allocated
* Same as last example, (CPU, CPU)
* 2. Mapped to GTT
* Nothing changes (assertions find that it is not in the GPU)
* 3. Read/written by GPU
* execbuffer calls set_domain (RENDER, RENDER)
* flush_domains gets CPU
* invalidate_domains gets GPU
* clflush (obj)
* MI_FLUSH and drm_agp_chipset_flush
* 4. set_domain (CPU, CPU)
* flush_domains gets GPU
* invalidate_domains gets CPU
* wait_rendering (obj) to make sure all drawing is complete.
* This will include an MI_FLUSH to get the data from GPU
* to memory
* clflush (obj) to invalidate the CPU cache
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
* 5. Read/written by CPU
* cache lines are loaded and dirtied
* 6. Read written by GPU
* Same as last GPU access
*
* Case 3: The constant buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Read by GPU
* 4. Updated (written) by CPU again
* 5. Read by GPU
*
* 1. Allocated
* (CPU, CPU)
* 2. Written by CPU
* (CPU, CPU)
* 3. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
* 4. Updated (written) by CPU again
* (CPU, CPU)
* flush_domains = 0 (no previous write domain)
* invalidate_domains = 0 (no new read domains)
* 5. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static void
i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct change_domains *cd)
{
uint32_t invalidate_domains = 0, flush_domains = 0;
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
*/
if (obj->base.pending_write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
/*
* Flush the current write domain if
* the new read domains don't match. Invalidate
* any read domains which differ from the old
* write domain
*/
if (obj->base.write_domain &&
(((obj->base.write_domain != obj->base.pending_read_domains ||
obj->ring != ring)) ||
(obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
flush_domains |= obj->base.write_domain;
invalidate_domains |=
obj->base.pending_read_domains & ~obj->base.write_domain;
}
/*
* Invalidate any read caches which may have
* stale data. That is, any new read domains.
*/
invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
* write_domains). So if we have a current write domain that we
* aren't changing, set pending_write_domain to that.
*/
if (flush_domains == 0 && obj->base.pending_write_domain == 0)
obj->base.pending_write_domain = obj->base.write_domain;
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(ring);
}
struct eb_objects {
int and;
struct hlist_head buckets[0];
......@@ -587,6 +413,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_objects, objects);
......@@ -810,18 +637,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret;
}
static void
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
}
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
......@@ -854,48 +669,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
return 0;
}
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
struct change_domains cd;
uint32_t flush_domains = 0;
uint32_t flips = 0;
int ret;
memset(&cd, 0, sizeof(cd));
list_for_each_entry(obj, objects, exec_list)
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains);
}
if (cd.flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
flips |= atomic_read(&obj->pending_flip);
flush_domains |= obj->base.write_domain;
}
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_object_sync(obj, ring);
if (flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
ret = i915_gem_flush_ring(ring,
I915_GEM_GPU_DOMAINS,
ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
if (ret)
return ret;
ring->gpu_caches_dirty = false;
return 0;
return intel_ring_invalidate_all_caches(ring);
}
static bool
......@@ -943,9 +755,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
......@@ -954,17 +765,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
obj->last_write_seqno = seqno;
if (obj->pin_count) /* check for potential scanout */
intel_mark_busy(ring->dev, obj);
intel_mark_fb_busy(obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
intel_mark_busy(ring->dev, NULL);
}
static void
......@@ -972,16 +779,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(ring, file, request)) {
kfree(request);
}
(void)i915_add_request(ring, file, NULL);
}
static int
......
......@@ -423,6 +423,23 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
undo_idling(dev_priv, interruptible);
}
static void i915_gtt_color_adjust(struct drm_mm_node *node,
unsigned long color,
unsigned long *start,
unsigned long *end)
{
if (node->color != color)
*start += 4096;
if (!list_empty(&node->node_list)) {
node = list_entry(node->node_list.next,
struct drm_mm_node,
node_list);
if (node->allocated && node->color != color)
*end -= 4096;
}
}
void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
......@@ -432,6 +449,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* Substract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
......
......@@ -296,11 +296,21 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
static void i915_handle_rps_change(struct drm_device *dev)
/* defined intel_pm.c */
extern spinlock_t mchdev_lock;
static void ironlake_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay = dev_priv->cur_delay;
u8 new_delay;
unsigned long flags;
spin_lock_irqsave(&mchdev_lock, flags);
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
new_delay = dev_priv->cur_delay;
I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = I915_READ(RCPREVBSYTUPAVG);
......@@ -324,6 +334,8 @@ static void i915_handle_rps_change(struct drm_device *dev)
if (ironlake_set_drps(dev, new_delay))
dev_priv->cur_delay = new_delay;
spin_unlock_irqrestore(&mchdev_lock, flags);
return;
}
......@@ -335,7 +347,7 @@ static void notify_ring(struct drm_device *dev,
if (ring->obj == NULL)
return;
trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
......@@ -349,16 +361,16 @@ static void notify_ring(struct drm_device *dev,
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps_work);
rps.work);
u32 pm_iir, pm_imr;
u8 new_delay;
spin_lock_irq(&dev_priv->rps_lock);
pm_iir = dev_priv->pm_iir;
dev_priv->pm_iir = 0;
spin_lock_irq(&dev_priv->rps.lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
spin_unlock_irq(&dev_priv->rps.lock);
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
......@@ -366,9 +378,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->dev->struct_mutex);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->cur_delay + 1;
new_delay = dev_priv->rps.cur_delay + 1;
else
new_delay = dev_priv->cur_delay - 1;
new_delay = dev_priv->rps.cur_delay - 1;
gen6_set_rps(dev_priv->dev, new_delay);
......@@ -444,7 +456,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long flags;
if (!IS_IVYBRIDGE(dev))
if (!HAS_L3_GPU_CACHE(dev))
return;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
......@@ -488,19 +500,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
* dev_priv->pm_iir. Although missing an interrupt of the same
* dev_priv->rps.pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
* The mask bit in IMR is cleared by rps_work.
* The mask bit in IMR is cleared by dev_priv->rps.work.
*/
spin_lock_irqsave(&dev_priv->rps_lock, flags);
dev_priv->pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
spin_lock_irqsave(&dev_priv->rps.lock, flags);
dev_priv->rps.pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
queue_work(dev_priv->wq, &dev_priv->rps_work);
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
......@@ -793,10 +805,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
ibx_irq_handler(dev, pch_iir);
}
if (de_iir & DE_PCU_EVENT) {
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
i915_handle_rps_change(dev);
}
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
ironlake_handle_rps_change(dev);
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
......@@ -949,7 +959,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
{
err->size = obj->base.size;
err->name = obj->base.name;
err->seqno = obj->last_rendering_seqno;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
err->gtt_offset = obj->gtt_offset;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
......@@ -1039,12 +1050,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;
seqno = ring->get_seqno(ring);
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
continue;
if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
......@@ -1093,7 +1104,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring);
error->seqno[ring->id] = ring->get_seqno(ring, false);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
......@@ -1590,7 +1601,8 @@ ring_last_seqno(struct intel_ring_buffer *ring)
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
{
if (list_empty(&ring->request_list) ||
i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
i915_seqno_passed(ring->get_seqno(ring, false),
ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
if (waitqueue_active(&ring->irq_queue)) {
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
......@@ -2647,7 +2659,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
......
This diff is collapsed.
......@@ -213,7 +213,7 @@ void i915_setup_sysfs(struct drm_device *dev)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
if (IS_IVYBRIDGE(dev)) {
if (HAS_L3_GPU_CACHE(dev)) {
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
......
......@@ -47,6 +47,7 @@
struct intel_crt {
struct intel_encoder base;
bool force_hotplug_required;
u32 adpa_reg;
};
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
......@@ -55,6 +56,11 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_crt, base);
}
static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
......@@ -145,19 +151,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crt *crt =
intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_md_reg;
u32 adpa, dpll_md;
u32 adpa_reg;
dpll_md_reg = DPLL_MD(intel_crtc->pipe);
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
else
adpa_reg = ADPA;
/*
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
......@@ -185,7 +187,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
I915_WRITE(adpa_reg, adpa);
I915_WRITE(crt->adpa_reg, adpa);
}
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
......@@ -658,9 +660,7 @@ void intel_crt_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
crt->base.cloneable = true;
if (IS_HASWELL(dev))
crt->base.crtc_mask = (1 << 0);
else
......@@ -677,6 +677,13 @@ void intel_crt_init(struct drm_device *dev)
else
encoder_helper_funcs = &gmch_encoder_funcs;
if (HAS_PCH_SPLIT(dev))
crt->adpa_reg = PCH_ADPA;
else if (IS_VALLEYVIEW(dev))
crt->adpa_reg = VLV_ADPA;
else
crt->adpa_reg = ADPA;
drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
......
......@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
case PORT_B:
case PORT_C:
case PORT_D:
intel_hdmi_init(dev, DDI_BUF_CTL(port));
intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
break;
default:
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
......@@ -267,7 +267,8 @@ struct wrpll_tmds_clock {
u16 r2; /* Reference divider */
};
/* Table of matching values for WRPLL clocks programming for each frequency */
/* Table of matching values for WRPLL clocks programming for each frequency.
* The code assumes this table is sorted. */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18},
{20000, 48, 32, 18},
......@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{23000, 36, 23, 15},
{23500, 40, 40, 23},
{23750, 26, 16, 14},
{23750, 26, 16, 14},
{24000, 36, 24, 15},
{25000, 36, 25, 15},
{25175, 26, 40, 33},
......@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{108000, 8, 24, 15},
{108108, 8, 173, 108},
{109000, 6, 23, 19},
{109000, 6, 23, 19},
{110000, 6, 22, 18},
{110013, 6, 22, 18},
{110250, 8, 49, 30},
......@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{218250, 4, 42, 26},
{218750, 4, 34, 21},
{219000, 4, 47, 29},
{219000, 4, 47, 29},
{220000, 4, 44, 27},
{220640, 4, 49, 30},
{220750, 4, 36, 22},
......@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int port = intel_hdmi->ddi_port;
int pipe = intel_crtc->pipe;
int p, n2, r2, valid=0;
int p, n2, r2;
u32 temp, i;
/* On Haswell, we need to enable the clocks and prepare DDI function to
......@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
*/
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
p = wrpll_tmds_clock_table[i].p;
n2 = wrpll_tmds_clock_table[i].n2;
r2 = wrpll_tmds_clock_table[i].r2;
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
break;
DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
crtc->mode.clock,
p, n2, r2);
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
valid = 1;
break;
}
}
p = wrpll_tmds_clock_table[i].p;
n2 = wrpll_tmds_clock_table[i].n2;
r2 = wrpll_tmds_clock_table[i].r2;
if (!valid) {
DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
crtc->mode.clock);
return;
}
if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
crtc->mode.clock, p, n2, r2);
/* Enable LCPLL if disabled */
temp = I915_READ(LCPLL_CTL);
......@@ -723,15 +718,35 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
}
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = I915_READ(DDI_FUNC_CTL(pipe));
temp &= ~PIPE_DDI_PORT_MASK;
temp &= ~PIPE_DDI_BPC_12;
temp |= PIPE_DDI_SELECT_PORT(port) |
PIPE_DDI_MODE_SELECT_HDMI |
((intel_crtc->bpp > 24) ?
PIPE_DDI_BPC_12 :
PIPE_DDI_BPC_8) |
PIPE_DDI_FUNC_ENABLE;
temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
switch (intel_crtc->bpp) {
case 18:
temp |= PIPE_DDI_BPC_6;
break;
case 24:
temp |= PIPE_DDI_BPC_8;
break;
case 30:
temp |= PIPE_DDI_BPC_10;
break;
case 36:
temp |= PIPE_DDI_BPC_12;
break;
default:
WARN(1, "%d bpp unsupported by pipe DDI function\n",
intel_crtc->bpp);
}
if (intel_hdmi->has_hdmi_sink)
temp |= PIPE_DDI_MODE_SELECT_HDMI;
else
temp |= PIPE_DDI_MODE_SELECT_DVI;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= PIPE_DDI_PVSYNC;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
temp |= PIPE_DDI_PHSYNC;
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
......
This diff is collapsed.
......@@ -36,42 +36,10 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "drm_dp_helper.h"
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
struct edid *edid; /* cached EDID for eDP */
int edid_mode_count;
};
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
......@@ -1668,6 +1636,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
} else {
dp_reg_value &= ~DP_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
dp_reg_value |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
dp_reg_value |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
break;
}
}
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
......@@ -1675,12 +1682,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET,
intel_dp->train_set,
intel_dp->lane_count);
if (ret != intel_dp->lane_count)
return false;
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
DP_TRAINING_PATTERN_DISABLE) {
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET,
intel_dp->train_set,
intel_dp->lane_count);
if (ret != intel_dp->lane_count)
return false;
}
return true;
}
......@@ -1696,7 +1706,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
/*
......@@ -1717,10 +1726,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP |= DP_PORT_EN;
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
voltage_tries = 0;
......@@ -1744,12 +1749,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
......@@ -1804,10 +1804,8 @@ static void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false;
int tries, cr_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
/* channel equalization */
......@@ -1836,13 +1834,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE))
break;
......@@ -1877,15 +1870,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
I915_WRITE(intel_dp->output_reg, reg);
POSTING_READ(intel_dp->output_reg);
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
static void
......@@ -2441,7 +2426,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
}
void
intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
......@@ -2456,6 +2441,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return;
intel_dp->output_reg = output_reg;
intel_dp->port = port;
intel_dp->dpms_mode = -1;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
......@@ -2483,18 +2469,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
connector->polled = DRM_CONNECTOR_POLL_HPD;
if (output_reg == DP_B || output_reg == PCH_DP_B)
intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
else if (output_reg == DP_C || output_reg == PCH_DP_C)
intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
intel_encoder->cloneable = false;
if (is_edp(intel_dp)) {
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
ironlake_panel_vdd_work);
}
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
ironlake_panel_vdd_work);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
......@@ -2509,28 +2487,25 @@ intel_dp_init(struct drm_device *dev, int output_reg)
drm_sysfs_connector_add(connector);
/* Set up the DDC bus. */
switch (output_reg) {
case DP_A:
name = "DPDDC-A";
break;
case DP_B:
case PCH_DP_B:
dev_priv->hotplug_supported_mask |=
DPB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
dev_priv->hotplug_supported_mask |=
DPC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
dev_priv->hotplug_supported_mask |=
DPD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
switch (port) {
case PORT_A:
name = "DPDDC-A";
break;
case PORT_B:
dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case PORT_C:
dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case PORT_D:
dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
default:
WARN(1, "Invalid port %c\n", port_name(port));
break;
}
intel_dp_i2c_init(intel_dp, intel_connector, name);
......
......@@ -31,6 +31,7 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#include "drm_dp_helper.h"
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
......@@ -90,25 +91,6 @@
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
/* Intel Pipe Clone Bit */
#define INTEL_HDMIB_CLONE_BIT 1
#define INTEL_HDMIC_CLONE_BIT 2
#define INTEL_HDMID_CLONE_BIT 3
#define INTEL_HDMIE_CLONE_BIT 4
#define INTEL_HDMIF_CLONE_BIT 5
#define INTEL_SDVO_NON_TV_CLONE_BIT 6
#define INTEL_SDVO_TV_CLONE_BIT 7
#define INTEL_SDVO_LVDS_CLONE_BIT 8
#define INTEL_ANALOG_CLONE_BIT 9
#define INTEL_TV_CLONE_BIT 10
#define INTEL_DP_B_CLONE_BIT 11
#define INTEL_DP_C_CLONE_BIT 12
#define INTEL_DP_D_CLONE_BIT 13
#define INTEL_LVDS_CLONE_BIT 14
#define INTEL_DVO_TMDS_CLONE_BIT 15
#define INTEL_DVO_LVDS_CLONE_BIT 16
#define INTEL_EDP_CLONE_BIT 17
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
......@@ -153,9 +135,13 @@ struct intel_encoder {
struct drm_encoder base;
int type;
bool needs_tv_clock;
/*
* Intel hw has only one MUX where encoders could be clone, hence a
* simple flag is enough to compute the possible_clones mask.
*/
bool cloneable;
void (*hot_plug)(struct intel_encoder *);
int crtc_mask;
int clone_mask;
};
struct intel_connector {
......@@ -171,8 +157,6 @@ struct intel_crtc {
int dpms_mode;
bool active; /* is the crtc on? independent of the dpms mode */
bool primary_disabled; /* is the crtc obscured by a plane? */
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
......@@ -311,6 +295,38 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
enum port port;
uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
struct edid *edid; /* cached EDID for eDP */
int edid_mode_count;
};
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
......@@ -348,17 +364,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev,
struct drm_i915_gem_object *obj);
extern void intel_mark_busy(struct drm_device *dev);
extern void intel_mark_idle(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
......@@ -371,8 +391,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
void intel_sanitize_pm(struct drm_device *dev);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
......
......@@ -37,6 +37,7 @@
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
#define NS2501_ADDR 0x38
static const struct intel_dvo_device intel_dvo_devices[] = {
{
......@@ -74,7 +75,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.slave_addr = 0x75,
.gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
}
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ns2501",
.dvo_reg = DVOC,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
}
};
struct intel_dvo {
......@@ -396,17 +404,14 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
intel_encoder->clone_mask =
(1 << INTEL_DVO_TMDS_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
intel_encoder->cloneable = true;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
intel_encoder->clone_mask =
(1 << INTEL_DVO_LVDS_CLONE_BIT);
intel_encoder->cloneable = false;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
......
......@@ -889,7 +889,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
......@@ -923,48 +923,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_encoder->cloneable = false;
intel_hdmi->ddi_port = port;
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
intel_hdmi->ddi_port = PORT_B;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
break;
case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
intel_hdmi->ddi_port = PORT_C;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
break;
case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
intel_hdmi->ddi_port = PORT_D;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
} else {
/* If we got an unknown sdvox_reg, things are pretty much broken
* in a way that we should let the kernel know about it */
break;
case PORT_A:
/* Internal port only for eDP. */
default:
BUG();
}
......
......@@ -967,7 +967,7 @@ bool intel_lvds_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->cloneable = false;
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev))
......
This diff is collapsed.
......@@ -218,11 +218,6 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
ret = intel_emit_post_sync_nonzero_flush(ring);
if (ret)
return ret;
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
......@@ -262,6 +257,20 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
static int
gen6_render_ring_flush__wa(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
ret = intel_emit_post_sync_nonzero_flush(ring);
if (ret)
return ret;
return gen6_render_ring_flush(ring, invalidate_domains, flush_domains);
}
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
......@@ -462,7 +471,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (IS_IVYBRIDGE(dev))
if (HAS_L3_GPU_CACHE(dev))
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
return ret;
......@@ -628,26 +637,24 @@ pc_render_add_request(struct intel_ring_buffer *ring,
}
static u32
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct drm_device *dev = ring->dev;
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
if (IS_GEN6(dev) || IS_GEN7(dev))
if (!lazy_coherency)
intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
ring_get_seqno(struct intel_ring_buffer *ring)
ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring)
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct pipe_control *pc = ring->private;
return pc->cpu_page[0];
......@@ -852,7 +859,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
if (IS_IVYBRIDGE(dev) && ring->id == RCS)
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
GEN6_RENDER_L3_PARITY_ERROR));
else
......@@ -875,7 +882,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
if (IS_IVYBRIDGE(dev) && ring->id == RCS)
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
else
I915_WRITE_IMR(ring, ~0);
......@@ -1010,7 +1017,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = 32 * PAGE_SIZE;
init_waitqueue_head(&ring->irq_queue);
......@@ -1380,6 +1386,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen6_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush__wa;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
......@@ -1481,7 +1489,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = size;
ring->effective_size = ring->size;
......@@ -1574,3 +1581,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
int
intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
{
int ret;
if (!ring->gpu_caches_dirty)
return 0;
ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
ring->gpu_caches_dirty = false;
return 0;
}
int
intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
{
uint32_t flush_domains;
int ret;
flush_domains = 0;
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
ring->gpu_caches_dirty = false;
return 0;
}
......@@ -72,7 +72,14 @@ struct intel_ring_buffer {
u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno);
u32 (*get_seqno)(struct intel_ring_buffer *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
......@@ -100,15 +107,6 @@ struct intel_ring_buffer {
*/
struct list_head request_list;
/**
* List of objects currently pending a GPU write flush.
*
* All elements on this list will belong to either the
* active_list or flushing_list, last_rendering_seqno can
* be used to differentiate between the two elements.
*/
struct list_head gpu_write_list;
/**
* Do we have some not yet emitted requests outstanding?
*/
......@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
......
This diff is collapsed.
......@@ -1622,7 +1622,7 @@ intel_tv_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
intel_encoder->cloneable = false;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment