Commit 66fd7a66 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2016-05-22:
- cmd-parser support for direct reg->reg loads (Ken Graunke)
- better handle DP++ smart dongles (Ville)
- bxt guc fw loading support (Nick Hoathe)
- remove a bunch of struct typedefs from dpll code (Ander)
- tons of small work all over to avoid casting between drm_device and the i915
  dev struct (Tvrtko&Chris)
- untangle request retiring from other operations, also fixes reset stat corner
  cases (Chris)
- skl atomic watermark support from Matt Roper, yay!
- various wm handling bugfixes from Ville
- big pile of cdclck rework for bxt/skl (Ville)
- CABC (Content Adaptive Brigthness Control) for dsi panels (Jani&Deepak M)
- nonblocking atomic commits for plane-only updates (Maarten Lankhorst)
- bunch of PSR fixes&improvements
- untangle our map/pin/sg_iter code a bit (Dave Gordon)
drm-intel-next-2016-05-08:
- refactor stolen quirks to share code between early quirks and i915 (Joonas)
- refactor gem BO/vma funcstion (Tvrtko&Dave)
- backlight over DPCD support (Yetunde Abedisi)
- more dsi panel sequence support (Jani)
- lots of refactoring around handling iomaps, vma, ring access and related
  topics culmulating in removing the duplicated request tracking in the execlist
  code (Chris & Tvrtko) includes a small patch for core iomapping code
- hw state readout for bxt dsi (Ramalingam C)
- cdclk cleanups (Ville)
- dedupe chv pll code a bit (Ander)
- enable semaphores on gen8+ for legacy submission, to be able to have a direct
  comparison against execlist on the same platform (Chris) Not meant to be used
  for anything else but performance tuning
- lvds border bit hw state checker fix (Jani)
- rpm vs. shrinker/oom-notifier fixes (Praveen Paneri)
- l3 tuning (Imre)
- revert mst dp audio, it's totally non-functional and crash-y (Lyude)
- first official dmc for kbl (Rodrigo)
- and tons of small things all over as usual

* 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (194 commits)
  drm/i915: Revert async unpin and nonblocking atomic commit
  drm/i915: Update DRIVER_DATE to 20160522
  drm/i915: Inline sg_next() for the optimised SGL iterator
  drm/i915: Introduce & use new lightweight SGL iterators
  drm/i915: optimise i915_gem_object_map() for small objects
  drm/i915: refactor i915_gem_object_pin_map()
  drm/i915/psr: Implement PSR2 w/a for gen9
  drm/i915/psr: Use ->get_aux_send_ctl functions
  drm/i915/psr: Order DP aux transactions correctly
  drm/i915/psr: Make idle_frames sensible again
  drm/i915/psr: Try to program link training times correctly
  drm/i915/userptr: Convert to drm_i915_private
  drm/i915: Allow nonblocking update of pageflips.
  drm/i915: Check for unpin correctness.
  Reapply "drm/i915: Avoid stalling on pending flips for legacy cursor updates"
  drm/i915: Make unpin async.
  drm/i915: Prepare connectors for nonblocking checks.
  drm/i915: Pass atomic states to fbc update functions.
  drm/i915: Remove reset_counter from intel_crtc.
  drm/i915: Remove queue_flip pointer.
  ...
parents 65439b68 e42aeef1
This diff is collapsed.
...@@ -242,6 +242,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, ...@@ -242,6 +242,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
} }
/**
* drm_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
*
* This function is similar to @drm_crtc_vblank_count but this
* function interpolates to handle a race with vblank irq's.
*
* This is mostly useful for hardware that can obtain the scanout
* position, but doesn't have a frame counter.
*/
u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
u32 vblank;
unsigned long flags;
WARN(!dev->driver->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
drm_update_vblank_count(dev, pipe, 0);
vblank = drm_vblank_count(dev, pipe);
spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
return vblank;
}
EXPORT_SYMBOL(drm_accurate_vblank_count);
/* /*
* Disable vblank irq's on crtc, make sure that last vblank count * Disable vblank irq's on crtc, make sure that last vblank count
* of hardware and corresponding consistent software vblank counter * of hardware and corresponding consistent software vblank counter
......
...@@ -59,6 +59,7 @@ i915-y += intel_audio.o \ ...@@ -59,6 +59,7 @@ i915-y += intel_audio.o \
intel_bios.o \ intel_bios.o \
intel_color.o \ intel_color.o \
intel_display.o \ intel_display.o \
intel_dpio_phy.o \
intel_dpll_mgr.o \ intel_dpll_mgr.o \
intel_fbc.o \ intel_fbc.o \
intel_fifo_underrun.o \ intel_fifo_underrun.o \
...@@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \ ...@@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \
dvo_tfp410.o \ dvo_tfp410.o \
intel_crt.o \ intel_crt.o \
intel_ddi.o \ intel_ddi.o \
intel_dp_aux_backlight.o \
intel_dp_link_training.o \ intel_dp_link_training.o \
intel_dp_mst.o \ intel_dp_mst.o \
intel_dp.o \ intel_dp.o \
intel_dsi.o \ intel_dsi.o \
intel_dsi_dcs_backlight.o \
intel_dsi_panel_vbt.o \ intel_dsi_panel_vbt.o \
intel_dsi_pll.o \ intel_dsi_pll.o \
intel_dvo.o \ intel_dvo.o \
......
...@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { ...@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
...@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) ...@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
int cmd_table_count; int cmd_table_count;
int ret; int ret;
if (!IS_GEN7(engine->dev)) if (!IS_GEN7(engine->i915))
return 0; return 0;
switch (engine->id) { switch (engine->id) {
case RCS: case RCS:
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds; cmd_tables = hsw_render_ring_cmds;
cmd_table_count = cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds); ARRAY_SIZE(hsw_render_ring_cmds);
...@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) ...@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_render_cmds); cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
} }
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_render_reg_tables; engine->reg_tables = hsw_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else { } else {
...@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) ...@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break; break;
case BCS: case BCS:
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds; cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else { } else {
...@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) ...@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
} }
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables; engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else { } else {
...@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine) ...@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
if (!engine->needs_cmd_parser) if (!engine->needs_cmd_parser)
return false; return false;
if (!USES_PPGTT(engine->dev)) if (!USES_PPGTT(engine->i915))
return false; return false;
return (i915.enable_cmd_parser == 1); return (i915.enable_cmd_parser == 1);
...@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false; return false;
} }
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[offset + 1] != 0); *oacontrol_set = (cmd[offset + 1] != 0);
} }
...@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false; return false;
} }
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
reg_addr);
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length || (offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) { (cmd[offset + 1] & reg->mask) != reg->value)) {
...@@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine, ...@@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
* *
* Return: the current version number of the cmd parser * Return: the current version number of the cmd parser
*/ */
int i915_cmd_parser_get_version(void) int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{ {
struct intel_engine_cs *engine;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv) {
if (i915_needs_cmd_parser(engine)) {
active = true;
break;
}
}
if (!active)
return 0;
/* /*
* Command parser version history * Command parser version history
* *
...@@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void) ...@@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void)
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers. * 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers * 6. TIMESTAMP register and Haswell CS GPR registers
* 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
*/ */
return 6; return 7;
} }
...@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data) ...@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0; return 0;
} }
static const char get_active_flag(struct drm_i915_gem_object *obj) static char get_active_flag(struct drm_i915_gem_object *obj)
{ {
return obj->active ? '*' : ' '; return obj->active ? '*' : ' ';
} }
static const char get_pin_flag(struct drm_i915_gem_object *obj) static char get_pin_flag(struct drm_i915_gem_object *obj)
{ {
return obj->pin_display ? 'p' : ' '; return obj->pin_display ? 'p' : ' ';
} }
static const char get_tiling_flag(struct drm_i915_gem_object *obj) static char get_tiling_flag(struct drm_i915_gem_object *obj)
{ {
switch (obj->tiling_mode) { switch (obj->tiling_mode) {
default: default:
...@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj) ...@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
} }
} }
static inline const char get_global_flag(struct drm_i915_gem_object *obj) static char get_global_flag(struct drm_i915_gem_object *obj)
{ {
return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
} }
static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{ {
return obj->mapping ? 'M' : ' '; return obj->mapping ? 'M' : ' ';
} }
...@@ -607,18 +607,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -607,18 +607,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe); const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane); const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work; struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock); spin_lock_irq(&dev->event_lock);
work = crtc->unpin_work; work = crtc->flip_work;
if (work == NULL) { if (work == NULL) {
seq_printf(m, "No flip due on pipe %c (plane %c)\n", seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
u32 pending;
u32 addr; u32 addr;
if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { pending = atomic_read(&work->pending);
seq_printf(m, "Flip queued on pipe %c (plane %c)\n", if (pending) {
seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
...@@ -638,11 +640,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -638,11 +640,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank, work->flip_queued_vblank,
work->flip_ready_vblank, work->flip_ready_vblank,
drm_crtc_vblank_count(&crtc->base)); intel_crtc_get_vblank_counter(crtc));
if (work->enable_stall_check)
seq_puts(m, "Stall check enabled, ");
else
seq_puts(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
...@@ -1383,7 +1381,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1383,7 +1381,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seqno[id] = engine->get_seqno(engine); seqno[id] = engine->get_seqno(engine);
} }
i915_get_extra_instdone(dev, instdone); i915_get_extra_instdone(dev_priv, instdone);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -2004,7 +2002,7 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -2004,7 +2002,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
ctx->legacy_hw_ctx.rcs_state == NULL) ctx->legacy_hw_ctx.rcs_state == NULL)
continue; continue;
seq_puts(m, "HW context "); seq_printf(m, "HW context %u ", ctx->hw_id);
describe_ctx(m, ctx); describe_ctx(m, ctx);
if (ctx == dev_priv->kernel_context) if (ctx == dev_priv->kernel_context)
seq_printf(m, "(kernel context) "); seq_printf(m, "(kernel context) ");
...@@ -2046,15 +2044,13 @@ static void i915_dump_lrc_obj(struct seq_file *m, ...@@ -2046,15 +2044,13 @@ static void i915_dump_lrc_obj(struct seq_file *m,
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0; unsigned long ggtt_offset = 0;
seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
if (ctx_obj == NULL) { if (ctx_obj == NULL) {
seq_printf(m, "Context on %s with no gem object\n", seq_puts(m, "\tNot allocated\n");
engine->name);
return; return;
} }
seq_printf(m, "CONTEXT: %s %u\n", engine->name,
intel_execlists_ctx_id(ctx, engine));
if (!i915_gem_obj_ggtt_bound(ctx_obj)) if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n"); seq_puts(m, "\tNot bound in GGTT\n");
else else
...@@ -2100,9 +2096,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) ...@@ -2100,9 +2096,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret; return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context) for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv) i915_dump_lrc_obj(m, ctx, engine);
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -2173,8 +2168,8 @@ static int i915_execlists(struct seq_file *m, void *data) ...@@ -2173,8 +2168,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count); seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) { if (head_req) {
seq_printf(m, "\tHead request id: %u\n", seq_printf(m, "\tHead request context: %u\n",
intel_execlists_ctx_id(head_req->ctx, engine)); head_req->ctx->hw_id);
seq_printf(m, "\tHead request tail: %u\n", seq_printf(m, "\tHead request tail: %u\n",
head_req->tail); head_req->tail);
} }
...@@ -2313,12 +2308,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) ...@@ -2313,12 +2308,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
if (INTEL_INFO(dev)->gen == 6) if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
seq_printf(m, "%s\n", engine->name); seq_printf(m, "%s\n", engine->name);
if (INTEL_INFO(dev)->gen == 7) if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", seq_printf(m, "GFX_MODE: 0x%08x\n",
I915_READ(RING_MODE_GEN7(engine))); I915_READ(RING_MODE_GEN7(engine)));
seq_printf(m, "PP_DIR_BASE: 0x%08x\n", seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
...@@ -3168,7 +3163,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) ...@@ -3168,7 +3163,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
enum intel_engine_id id; enum intel_engine_id id;
int j, ret; int j, ret;
if (!i915_semaphore_is_enabled(dev)) { if (!i915_semaphore_is_enabled(dev_priv)) {
seq_puts(m, "Semaphores are disabled\n"); seq_puts(m, "Semaphores are disabled\n");
return 0; return 0;
} }
...@@ -4769,7 +4764,7 @@ i915_wedged_set(void *data, u64 val) ...@@ -4769,7 +4764,7 @@ i915_wedged_set(void *data, u64 val)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
i915_handle_error(dev, val, i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val); "Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -4919,7 +4914,7 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -4919,7 +4914,7 @@ i915_drop_caches_set(void *data, u64 val)
} }
if (val & (DROP_RETIRE | DROP_ACTIVE)) if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev_priv);
if (val & DROP_BOUND) if (val & DROP_BOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
...@@ -4993,7 +4988,7 @@ i915_max_freq_set(void *data, u64 val) ...@@ -4993,7 +4988,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val; dev_priv->rps.max_freq_softlimit = val;
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -5060,7 +5055,7 @@ i915_min_freq_set(void *data, u64 val) ...@@ -5060,7 +5055,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val; dev_priv->rps.min_freq_softlimit = val;
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
......
...@@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_SEMAPHORES: case I915_PARAM_HAS_SEMAPHORES:
value = i915_semaphore_is_enabled(dev); value = i915_semaphore_is_enabled(dev_priv);
break; break;
case I915_PARAM_HAS_PRIME_VMAP_FLUSH: case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1; value = 1;
...@@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_CMD_PARSER_VERSION: case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(); value = i915_cmd_parser_get_version(dev_priv);
break; break;
case I915_PARAM_HAS_COHERENT_PHYS_GTT: case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1; value = 1;
...@@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -ENODEV; return -ENODEV;
break; break;
case I915_PARAM_HAS_GPU_RESET: case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck && value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
intel_has_gpu_reset(dev);
break; break;
case I915_PARAM_HAS_RESOURCE_STREAMER: case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev); value = HAS_RESOURCE_STREAMER(dev);
...@@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { ...@@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch, .can_switch = i915_switcheroo_can_switch,
}; };
static void i915_gem_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode. And the only
* known way to disable logical contexts is through a GPU reset.
*
* So in order to leave the system in a known default configuration,
* always reset the GPU upon unload. Afterwards we then clean up the
* GEM state tracking, flushing off the requests and leaving the
* system in a known idle state.
*
* Note that is of the upmost importance that the GPU is idle and
* all stray writes are flushed *before* we dismantle the backing
* storage for the pinned objects.
*
* However, since we are uncertain that reseting the GPU on older
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
if (HAS_HW_CONTEXTS(dev)) {
int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
WARN_ON(!list_empty(&to_i915(dev)->context_list));
}
static int i915_load_modeset_init(struct drm_device *dev) static int i915_load_modeset_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_vga_client; goto cleanup_vga_client;
/* must happen before intel_power_domains_init_hw() on VLV/CHV */
intel_update_rawclk(dev_priv);
intel_power_domains_init_hw(dev_priv, false); intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv); intel_csr_ucode_init(dev_priv);
...@@ -503,10 +542,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -503,10 +542,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0; return 0;
cleanup_gem: cleanup_gem:
mutex_lock(&dev->struct_mutex); i915_gem_fini(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_irq: cleanup_irq:
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
...@@ -850,7 +886,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -850,7 +886,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
DRM_INFO("Display disabled (module parameter)\n"); DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0; info->num_pipes = 0;
} else if (info->num_pipes > 0 && } else if (info->num_pipes > 0 &&
(INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
HAS_PCH_SPLIT(dev)) { HAS_PCH_SPLIT(dev)) {
u32 fuse_strap = I915_READ(FUSE_STRAP); u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP);
...@@ -874,7 +910,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -874,7 +910,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
DRM_INFO("PipeC fused off\n"); DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1; info->num_pipes -= 1;
} }
} else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
u32 dfsm = I915_READ(SKL_DFSM); u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0; u8 disabled_mask = 0;
bool invalid; bool invalid;
...@@ -915,9 +951,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -915,9 +951,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 9) else if (INTEL_INFO(dev)->gen >= 9)
gen9_sseu_info_init(dev); gen9_sseu_info_init(dev);
/* Snooping is broken on BXT A stepping. */
info->has_snoop = !info->has_llc; info->has_snoop = !info->has_llc;
info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
/* Snooping is broken on BXT A stepping. */
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
info->has_snoop = false;
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
...@@ -930,6 +968,20 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -930,6 +968,20 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->has_subslice_pg ? "y" : "n"); info->has_subslice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has EU power gating: %s\n", DRM_DEBUG_DRIVER("has EU power gating: %s\n",
info->has_eu_pg ? "y" : "n"); info->has_eu_pg ? "y" : "n");
i915.enable_execlists =
intel_sanitize_enable_execlists(dev_priv,
i915.enable_execlists);
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
i915.enable_ppgtt =
intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
} }
static void intel_init_dpio(struct drm_i915_private *dev_priv) static void intel_init_dpio(struct drm_i915_private *dev_priv)
...@@ -1020,6 +1072,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -1020,6 +1072,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
memcpy(device_info, info, sizeof(dev_priv->info)); memcpy(device_info, info, sizeof(dev_priv->info));
device_info->device_id = dev->pdev->device; device_info->device_id = dev->pdev->device;
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
device_info->gen_mask = BIT(device_info->gen - 1);
spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock); spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock); mutex_init(&dev_priv->backlight_lock);
...@@ -1137,7 +1192,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) ...@@ -1137,7 +1192,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
if (ret < 0) if (ret < 0)
goto put_bridge; goto put_bridge;
intel_uncore_init(dev); intel_uncore_init(dev_priv);
return 0; return 0;
...@@ -1155,7 +1210,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) ...@@ -1155,7 +1210,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
intel_uncore_fini(dev); intel_uncore_fini(dev_priv);
i915_mmio_cleanup(dev); i915_mmio_cleanup(dev);
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
} }
...@@ -1206,8 +1261,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1206,8 +1261,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(dev->pdev); pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */ /* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev)) if (IS_GEN2(dev)) {
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
goto out_ggtt;
}
}
/* 965GM sometimes incorrectly writes to hardware status page (HWS) /* 965GM sometimes incorrectly writes to hardware status page (HWS)
* using 32bit addressing, overwriting memory if HWS is located * using 32bit addressing, overwriting memory if HWS is located
...@@ -1217,8 +1279,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1217,8 +1279,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB, * behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully. * which also needs to be handled carefully.
*/ */
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
goto out_ggtt;
}
}
aperture_size = ggtt->mappable_end; aperture_size = ggtt->mappable_end;
...@@ -1236,7 +1305,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1236,7 +1305,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE); PM_QOS_DEFAULT_VALUE);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev_priv);
intel_opregion_setup(dev); intel_opregion_setup(dev);
...@@ -1300,7 +1369,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) ...@@ -1300,7 +1369,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* Notify a valid surface after modesetting, * Notify a valid surface after modesetting,
* when running inside a VM. * when running inside a VM.
*/ */
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
i915_setup_sysfs(dev); i915_setup_sysfs(dev);
...@@ -1459,10 +1528,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1459,10 +1528,7 @@ int i915_driver_unload(struct drm_device *dev)
flush_workqueue(dev_priv->wq); flush_workqueue(dev_priv->wq);
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex); i915_gem_fini(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv); intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv); intel_power_domains_fini(dev_priv);
...@@ -1570,15 +1636,15 @@ const struct drm_ioctl_desc i915_ioctls[] = { ...@@ -1570,15 +1636,15 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
......
...@@ -298,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = { ...@@ -298,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = {
static const struct intel_device_info intel_broadwell_d_info = { static const struct intel_device_info intel_broadwell_d_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .gen = 8,
.is_broadwell = 1,
}; };
static const struct intel_device_info intel_broadwell_m_info = { static const struct intel_device_info intel_broadwell_m_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .is_mobile = 1, .gen = 8, .is_mobile = 1,
.is_broadwell = 1,
}; };
static const struct intel_device_info intel_broadwell_gt3d_info = { static const struct intel_device_info intel_broadwell_gt3d_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .gen = 8,
.is_broadwell = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
}; };
static const struct intel_device_info intel_broadwell_gt3m_info = { static const struct intel_device_info intel_broadwell_gt3m_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .is_mobile = 1, .gen = 8, .is_mobile = 1,
.is_broadwell = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
}; };
...@@ -528,9 +532,9 @@ void intel_detect_pch(struct drm_device *dev) ...@@ -528,9 +532,9 @@ void intel_detect_pch(struct drm_device *dev)
pci_dev_put(pch); pci_dev_put(pch);
} }
bool i915_semaphore_is_enabled(struct drm_device *dev) bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
{ {
if (INTEL_INFO(dev)->gen < 6) if (INTEL_GEN(dev_priv) < 6)
return false; return false;
if (i915.semaphores >= 0) if (i915.semaphores >= 0)
...@@ -540,13 +544,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) ...@@ -540,13 +544,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.enable_execlists) if (i915.enable_execlists)
return false; return false;
/* Until we get further testing... */
if (IS_GEN8(dev))
return false;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */ /* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
return false; return false;
#endif #endif
...@@ -608,7 +608,7 @@ static int i915_drm_suspend(struct drm_device *dev) ...@@ -608,7 +608,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_guc_suspend(dev); intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev_priv);
intel_display_suspend(dev); intel_display_suspend(dev);
...@@ -628,7 +628,7 @@ static int i915_drm_suspend(struct drm_device *dev) ...@@ -628,7 +628,7 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev, opregion_target_state); intel_opregion_notify_adapter(dev, opregion_target_state);
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev_priv, false);
intel_opregion_fini(dev); intel_opregion_fini(dev);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
...@@ -775,7 +775,7 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -775,7 +775,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup) if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev); dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
intel_dp_mst_resume(dev); intel_dp_mst_resume(dev);
...@@ -868,9 +868,9 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -868,9 +868,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret); ret);
intel_uncore_early_sanitize(dev, true); intel_uncore_early_sanitize(dev_priv, true);
if (IS_BROXTON(dev)) { if (IS_BROXTON(dev_priv)) {
if (!dev_priv->suspended_to_idle) if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv); gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv); bxt_disable_dc9(dev_priv);
...@@ -878,7 +878,7 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -878,7 +878,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
} }
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev_priv);
if (IS_BROXTON(dev_priv) || if (IS_BROXTON(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
...@@ -921,14 +921,14 @@ int i915_resume_switcheroo(struct drm_device *dev) ...@@ -921,14 +921,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state * - re-init interrupt state
* - re-init display * - re-init display
*/ */
int i915_reset(struct drm_device *dev) int i915_reset(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_device *dev = dev_priv->dev;
struct i915_gpu_error *error = &dev_priv->gpu_error; struct i915_gpu_error *error = &dev_priv->gpu_error;
unsigned reset_counter; unsigned reset_counter;
int ret; int ret;
intel_reset_gt_powersave(dev); intel_reset_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
...@@ -944,7 +944,7 @@ int i915_reset(struct drm_device *dev) ...@@ -944,7 +944,7 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev); i915_gem_reset(dev);
ret = intel_gpu_reset(dev, ALL_ENGINES); ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
/* Also reset the gpu hangman. */ /* Also reset the gpu hangman. */
if (error->stop_rings != 0) { if (error->stop_rings != 0) {
...@@ -999,7 +999,7 @@ int i915_reset(struct drm_device *dev) ...@@ -999,7 +999,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset. * of re-init after reset.
*/ */
if (INTEL_INFO(dev)->gen > 5) if (INTEL_INFO(dev)->gen > 5)
intel_enable_gt_powersave(dev); intel_enable_gt_powersave(dev_priv);
return 0; return 0;
...@@ -1107,6 +1107,49 @@ static int i915_pm_resume(struct device *dev) ...@@ -1107,6 +1107,49 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev); return i915_drm_resume(drm_dev);
} }
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *dev)
{
return i915_pm_suspend(dev);
}
static int i915_pm_freeze_late(struct device *dev)
{
int ret;
ret = i915_pm_suspend_late(dev);
if (ret)
return ret;
ret = i915_gem_freeze_late(dev_to_i915(dev));
if (ret)
return ret;
return 0;
}
/* thaw: called after creating the hibernation image, but before turning off. */
static int i915_pm_thaw_early(struct device *dev)
{
return i915_pm_resume_early(dev);
}
static int i915_pm_thaw(struct device *dev)
{
return i915_pm_resume(dev);
}
/* restore: called after loading the hibernation image. */
static int i915_pm_restore_early(struct device *dev)
{
return i915_pm_resume_early(dev);
}
static int i915_pm_restore(struct device *dev)
{
return i915_pm_resume(dev);
}
/* /*
* Save all Gunit registers that may be lost after a D3 and a subsequent * Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is * S0i[R123] transition. The list of registers needing a save/restore is
...@@ -1470,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1470,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV; return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
...@@ -1509,7 +1552,7 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1509,7 +1552,7 @@ static int intel_runtime_suspend(struct device *device)
intel_guc_suspend(dev); intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv);
ret = 0; ret = 0;
...@@ -1531,7 +1574,7 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1531,7 +1574,7 @@ static int intel_runtime_suspend(struct device *device)
return ret; return ret;
} }
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev_priv, false);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
...@@ -1612,7 +1655,7 @@ static int intel_runtime_resume(struct device *device) ...@@ -1612,7 +1655,7 @@ static int intel_runtime_resume(struct device *device)
* we can do is to hope that things will still work (and disable RPM). * we can do is to hope that things will still work (and disable RPM).
*/ */
i915_gem_init_swizzling(dev); i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev); gen6_update_ring_freq(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv);
...@@ -1624,7 +1667,7 @@ static int intel_runtime_resume(struct device *device) ...@@ -1624,7 +1667,7 @@ static int intel_runtime_resume(struct device *device)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv); intel_hpd_init(dev_priv);
intel_enable_gt_powersave(dev); intel_enable_gt_powersave(dev_priv);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
...@@ -1661,14 +1704,14 @@ static const struct dev_pm_ops i915_pm_ops = { ...@@ -1661,14 +1704,14 @@ static const struct dev_pm_ops i915_pm_ops = {
* @restore, @restore_early : called after rebooting and restoring the * @restore, @restore_early : called after rebooting and restoring the
* hibernation image [PMSG_RESTORE] * hibernation image [PMSG_RESTORE]
*/ */
.freeze = i915_pm_suspend, .freeze = i915_pm_freeze,
.freeze_late = i915_pm_suspend_late, .freeze_late = i915_pm_freeze_late,
.thaw_early = i915_pm_resume_early, .thaw_early = i915_pm_thaw_early,
.thaw = i915_pm_resume, .thaw = i915_pm_thaw,
.poweroff = i915_pm_suspend, .poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_poweroff_late, .poweroff_late = i915_pm_poweroff_late,
.restore_early = i915_pm_resume_early, .restore_early = i915_pm_restore_early,
.restore = i915_pm_resume, .restore = i915_pm_restore,
/* S0ix (via runtime suspend) event handlers */ /* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend, .runtime_suspend = intel_runtime_suspend,
......
This diff is collapsed.
This diff is collapsed.
...@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (obj == NULL) { if (obj == NULL) {
int ret; int ret;
obj = i915_gem_alloc_object(pool->dev, size); obj = i915_gem_object_create(pool->dev, size);
if (obj == NULL) if (IS_ERR(obj))
return ERR_PTR(-ENOMEM); return obj;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_get_pages(obj);
if (ret) if (ret)
......
This diff is collapsed.
...@@ -154,7 +154,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -154,7 +154,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(dev); i915_gem_retire_requests(to_i915(dev));
goto search_again; goto search_again;
} }
...@@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) ...@@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(vm->dev); i915_gem_retire_requests(to_i915(vm->dev));
WARN_ON(!list_empty(&vm->active_list)); WARN_ON(!list_empty(&vm->active_list));
} }
......
...@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, ...@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct i915_address_space *vm; struct i915_address_space *vm;
struct list_head ordered_vmas; struct list_head ordered_vmas;
struct list_head pinned_vmas; struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
int retry; int retry;
i915_gem_retire_requests_ring(engine); i915_gem_retire_requests_ring(engine);
...@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, ...@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
} }
if (flush_chipset) if (flush_chipset)
i915_gem_chipset_flush(req->engine->dev); i915_gem_chipset_flush(req->engine->i915);
if (flush_domains & I915_GEM_DOMAIN_GTT) if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb(); wmb();
...@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, ...@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
if (i915.enable_execlists && !ctx->engine[engine->id].state) {
int ret = intel_lr_context_deferred_alloc(ctx, engine);
if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret);
}
}
return ctx; return ctx;
} }
...@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, ...@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req); i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(engine->dev); struct drm_i915_private *dev_priv = engine->i915;
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list); &dev_priv->mm.fence_list);
} }
......
...@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page) ...@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
void void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
int i; int i;
if (obj->bit_17 == NULL) if (obj->bit_17 == NULL)
return; return;
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sgt_page(page, sgt_iter, obj->pages) {
struct page *page = sg_page_iter_page(&sg_iter);
char new_bit_17 = page_to_phys(page) >> 17; char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) { (test_bit(i, obj->bit_17) != 0)) {
...@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
int page_count = obj->base.size >> PAGE_SHIFT; int page_count = obj->base.size >> PAGE_SHIFT;
int i; int i;
...@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
} }
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) for_each_sgt_page(page, sgt_iter, obj->pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17); __set_bit(i, obj->bit_17);
else else
__clear_bit(i, obj->bit_17); __clear_bit(i, obj->bit_17);
......
This diff is collapsed.
...@@ -34,6 +34,8 @@ ...@@ -34,6 +34,8 @@
#ifndef __I915_GEM_GTT_H__ #ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
struct drm_i915_file_private; struct drm_i915_file_private;
typedef uint32_t gen6_pte_t; typedef uint32_t gen6_pte_t;
...@@ -175,6 +177,7 @@ struct i915_vma { ...@@ -175,6 +177,7 @@ struct i915_vma {
struct drm_mm_node node; struct drm_mm_node node;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm; struct i915_address_space *vm;
void __iomem *iomap;
/** Flags and address space this VMA is bound to */ /** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0) #define GLOBAL_BIND (1<<0)
...@@ -518,9 +521,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev); ...@@ -518,9 +521,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
void i915_gem_init_ggtt(struct drm_device *dev); void i915_gem_init_ggtt(struct drm_device *dev);
void i915_ggtt_cleanup_hw(struct drm_device *dev); void i915_ggtt_cleanup_hw(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev); int i915_ppgtt_init_hw(struct drm_device *dev);
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref); void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv); struct drm_i915_file_private *fpriv);
...@@ -535,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) ...@@ -535,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release); kref_put(&ppgtt->ref, i915_ppgtt_release);
} }
void i915_check_and_clear_faults(struct drm_device *dev); void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev);
...@@ -560,4 +561,36 @@ size_t ...@@ -560,4 +561,36 @@ size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj, i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view); const struct i915_ggtt_view *view);
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
*
* The passed in VMA has to be pinned in the global GTT mappable region.
* An extra pinning of the VMA is acquired for the return iomapping,
* the caller must call i915_vma_unpin_iomap to relinquish the pinning
* after the iomapping is no longer required.
*
* Callers must hold the struct_mutex.
*
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
/**
* i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
* @vma: VMA to unpin
*
* Unpins the previously iomapped VMA from i915_vma_pin_iomap().
*
* Callers must hold the struct_mutex. This function is only valid to be
* called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
*/
static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->dev->struct_mutex);
GEM_BUG_ON(vma->pin_count == 0);
GEM_BUG_ON(vma->iomap == NULL);
vma->pin_count--;
}
#endif #endif
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "intel_renderstate.h" #include "intel_renderstate.h"
static const struct intel_renderstate_rodata * static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen) render_state_get_rodata(const int gen)
{ {
switch (gen) { switch (gen) {
case 6: case 6:
...@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen) ...@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL; return NULL;
} }
static int render_state_init(struct render_state *so, struct drm_device *dev) static int render_state_init(struct render_state *so,
struct drm_i915_private *dev_priv)
{ {
int ret; int ret;
so->gen = INTEL_INFO(dev)->gen; so->gen = INTEL_GEN(dev_priv);
so->rodata = render_state_get_rodata(dev, so->gen); so->rodata = render_state_get_rodata(so->gen);
if (so->rodata == NULL) if (so->rodata == NULL)
return 0; return 0;
if (so->rodata->batch_items * 4 > 4096) if (so->rodata->batch_items * 4 > 4096)
return -EINVAL; return -EINVAL;
so->obj = i915_gem_alloc_object(dev, 4096); so->obj = i915_gem_object_create(dev_priv->dev, 4096);
if (so->obj == NULL) if (IS_ERR(so->obj))
return -ENOMEM; return PTR_ERR(so->obj);
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret) if (ret)
...@@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine, ...@@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
if (WARN_ON(engine->id != RCS)) if (WARN_ON(engine->id != RCS))
return -ENOENT; return -ENOENT;
ret = render_state_init(so, engine->dev); ret = render_state_init(so, engine->i915);
if (ret) if (ret)
return ret; return ret;
......
...@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long count = 0; unsigned long count = 0;
trace_i915_gem_shrink(dev_priv, target, flags); trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv->dev); i915_gem_retire_requests(dev_priv);
/*
* Unbinding of objects will require HW access; Let us not wake the
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
if ((flags & I915_SHRINK_BOUND) &&
!intel_runtime_pm_get_if_in_use(dev_priv))
flags &= ~I915_SHRINK_BOUND;
/* /*
* As we may completely rewrite the (un)bound list whilst unbinding * As we may completely rewrite the (un)bound list whilst unbinding
...@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
list_splice(&still_in_list, phase->list); list_splice(&still_in_list, phase->list);
} }
i915_gem_retire_requests(dev_priv->dev); if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
return count; return count;
} }
...@@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE; return NOTIFY_DONE;
intel_runtime_pm_get(dev_priv);
freed_pages = i915_gem_shrink_all(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv);
intel_runtime_pm_put(dev_priv);
/* Because we may be allocating inside our own driver, we cannot /* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not * assert that there are no objects with pinned pages that are not
...@@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier); container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu; struct shrinker_lock_uninterruptible slu;
unsigned long freed_pages; struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
int ret;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE; return NOTIFY_DONE;
freed_pages = i915_gem_shrink(dev_priv, -1UL, /* Force everything onto the inactive lists */
I915_SHRINK_BOUND | ret = i915_gpu_idle(dev_priv->dev);
I915_SHRINK_UNBOUND | if (ret)
I915_SHRINK_ACTIVE | goto out;
I915_SHRINK_VMAPS);
intel_runtime_pm_get(dev_priv);
freed_pages += i915_gem_shrink(dev_priv, -1UL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE |
I915_SHRINK_VMAPS);
intel_runtime_pm_put(dev_priv);
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
&dev_priv->ggtt.base.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
}
out:
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
*(unsigned long *)ptr += freed_pages; *(unsigned long *)ptr += freed_pages;
......
...@@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, ...@@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
/* See the comment at the drm_mm_init() call for more about this check. /* See the comment at the drm_mm_init() call for more about this check.
* WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) if (IS_GEN8(dev_priv) && start < 4096)
start = 4096; start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock); mutex_lock(&dev_priv->mm.stolen_lock);
...@@ -109,9 +109,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -109,9 +109,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 3) { if (INTEL_INFO(dev)->gen >= 3) {
u32 bsm; u32 bsm;
pci_read_config_dword(dev->pdev, BSM, &bsm); pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
base = bsm & BSM_MASK; base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) { } else if (IS_I865G(dev)) {
u16 toud = 0; u16 toud = 0;
......
...@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) ...@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
if (INTEL_INFO(obj->base.dev)->gen >= 4) if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true; return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) { if (IS_GEN3(obj->base.dev)) {
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false; return false;
} else { } else {
...@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/ */
if (obj->map_and_fenceable && if (obj->map_and_fenceable &&
!i915_gem_object_fence_ok(obj, args->tiling_mode)) !i915_gem_object_fence_ok(obj, args->tiling_mode))
ret = i915_gem_object_ggtt_unbind(obj); ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
if (ret == 0) { if (ret == 0) {
if (obj->pages && if (obj->pages &&
......
...@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
static void static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
BUG_ON(obj->userptr.work != NULL); BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false); __i915_gem_userptr_set_active(obj, false);
...@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) ...@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
i915_gem_gtt_finish_object(obj); i915_gem_gtt_finish_object(obj);
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sgt_page(page, sgt_iter, obj->pages) {
struct page *page = sg_page_iter_page(&sg_iter);
if (obj->dirty) if (obj->dirty)
set_page_dirty(page); set_page_dirty(page);
...@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file ...@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return 0; return 0;
} }
int void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
i915_gem_init_userptr(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
mutex_init(&dev_priv->mm_lock); mutex_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs); hash_init(dev_priv->mm_structs);
return 0;
} }
...@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
} }
if (INTEL_INFO(dev)->gen == 7) if (IS_GEN7(dev))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) for (i = 0; i < ARRAY_SIZE(error->ring); i++)
...@@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, ...@@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
return error_code; return error_code;
} }
static void i915_gem_record_fences(struct drm_device *dev, static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
if (IS_GEN3(dev) || IS_GEN2(dev)) { if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++) for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i)); error->fence[i] = I915_READ(FENCE_REG(i));
} else if (IS_GEN5(dev) || IS_GEN4(dev)) { } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++) for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
} else if (INTEL_INFO(dev)->gen >= 6) { } else if (INTEL_GEN(dev_priv) >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++) for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
} }
...@@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, ...@@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *to; struct intel_engine_cs *to;
enum intel_engine_id id; enum intel_engine_id id;
if (!i915_semaphore_is_enabled(dev_priv->dev)) if (!i915_semaphore_is_enabled(dev_priv))
return; return;
if (!error->semaphore_obj) if (!error->semaphore_obj)
...@@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, ...@@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
} }
} }
static void i915_record_ring_state(struct drm_device *dev, static void i915_record_ring_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_GEN(dev_priv) >= 6) {
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
if (INTEL_INFO(dev)->gen >= 8) if (INTEL_GEN(dev_priv) >= 8)
gen8_record_semaphore_state(dev_priv, error, engine, gen8_record_semaphore_state(dev_priv, error, engine,
ering); ering);
else else
gen6_record_semaphore_state(dev_priv, engine, ering); gen6_record_semaphore_state(dev_priv, engine, ering);
} }
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_GEN(dev_priv) >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_GEN(dev_priv) >= 8) {
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
} }
...@@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->tail = I915_READ_TAIL(engine); ering->tail = I915_READ_TAIL(engine);
ering->ctl = I915_READ_CTL(engine); ering->ctl = I915_READ_CTL(engine);
if (I915_NEED_GFX_HWS(dev)) { if (I915_NEED_GFX_HWS(dev_priv)) {
i915_reg_t mmio; i915_reg_t mmio;
if (IS_GEN7(dev)) { if (IS_GEN7(dev_priv)) {
switch (engine->id) { switch (engine->id) {
default: default:
case RCS: case RCS:
...@@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev,
mmio = VEBOX_HWS_PGA_GEN7; mmio = VEBOX_HWS_PGA_GEN7;
break; break;
} }
} else if (IS_GEN6(engine->dev)) { } else if (IS_GEN6(engine->i915)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base); mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else { } else {
/* XXX: gen8 returns to sanity */ /* XXX: gen8 returns to sanity */
...@@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->hangcheck_score = engine->hangcheck.score; ering->hangcheck_score = engine->hangcheck.score;
ering->hangcheck_action = engine->hangcheck.action; ering->hangcheck_action = engine->hangcheck.action;
if (USES_PPGTT(dev)) { if (USES_PPGTT(dev_priv)) {
int i; int i;
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
if (IS_GEN6(dev)) if (IS_GEN6(dev_priv))
ering->vm_info.pp_dir_base = ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine)); I915_READ(RING_PP_DIR_BASE_READ(engine));
else if (IS_GEN7(dev)) else if (IS_GEN7(dev_priv))
ering->vm_info.pp_dir_base = ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine)); I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_INFO(dev)->gen >= 8) else if (INTEL_GEN(dev_priv) >= 8)
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] = ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(engine, i)); I915_READ(GEN8_RING_PDP_UDW(engine, i));
...@@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, ...@@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct drm_i915_private *dev_priv = engine->dev->dev_private; struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */ /* Currently render ring is the only HW context user */
...@@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, ...@@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
} }
} }
static void i915_gem_record_rings(struct drm_device *dev, static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int i, count; int i, count;
...@@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].pid = -1; error->ring[i].pid = -1;
if (engine->dev == NULL) if (!intel_engine_initialized(engine))
continue; continue;
error->ring[i].valid = true; error->ring[i].valid = true;
i915_record_ring_state(dev, error, engine, &error->ring[i]); i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
request = i915_gem_find_active_request(engine); request = i915_gem_find_active_request(engine);
if (request) { if (request) {
...@@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, ...@@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->eir = I915_READ(EIR); error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER); error->pgtbl_er = I915_READ(PGTBL_ER);
i915_get_extra_instdone(dev, error->extra_instdone); i915_get_extra_instdone(dev_priv, error->extra_instdone);
} }
static void i915_error_capture_msg(struct drm_device *dev, static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
u32 engine_mask, u32 engine_mask,
const char *error_msg) const char *error_msg)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ecode; u32 ecode;
int ring_id = -1, len; int ring_id = -1, len;
...@@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev, ...@@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
len = scnprintf(error->error_msg, sizeof(error->error_msg), len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%d:0x%08x", "GPU HANG: ecode %d:%d:0x%08x",
INTEL_INFO(dev)->gen, ring_id, ecode); INTEL_GEN(dev_priv), ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1) if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len, len += scnprintf(error->error_msg + len,
...@@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, ...@@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools * out a structure which becomes available in debugfs for user level tools
* to pick up. * to pick up.
*/ */
void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, void i915_capture_error_state(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *error_msg) const char *error_msg)
{ {
static bool warned; static bool warned;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error; struct drm_i915_error_state *error;
unsigned long flags; unsigned long flags;
...@@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, ...@@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
i915_capture_gen_state(dev_priv, error); i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error); i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error); i915_gem_capture_buffers(dev_priv, error);
i915_gem_record_fences(dev, error); i915_gem_record_fences(dev_priv, error);
i915_gem_record_rings(dev, error); i915_gem_record_rings(dev_priv, error);
do_gettimeofday(&error->time); do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev); error->overlay = intel_overlay_capture_error_state(dev_priv);
error->display = intel_display_capture_error_state(dev); error->display = intel_display_capture_error_state(dev_priv);
i915_error_capture_msg(dev, error, engine_mask, error_msg); i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg); DRM_INFO("%s\n", error->error_msg);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
...@@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, ...@@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
warned = true; warned = true;
} }
} }
...@@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) ...@@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
} }
/* NB: please notice the memset */ /* NB: please notice the memset */
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
uint32_t *instdone)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
if (IS_GEN2(dev) || IS_GEN3(dev)) if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
instdone[0] = I915_READ(GEN2_INSTDONE); instdone[0] = I915_READ(GEN2_INSTDONE);
else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN4_INSTDONE1); instdone[1] = I915_READ(GEN4_INSTDONE1);
} else if (INTEL_INFO(dev)->gen >= 7) { } else if (INTEL_GEN(dev_priv) >= 7) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
......
...@@ -67,11 +67,11 @@ ...@@ -67,11 +67,11 @@
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) #define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050) #define GUC_WOPCM_SIZE _MMIO(0xc050)
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) #define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
#define GEN8_GT_PM_CONFIG _MMIO(0x138140) #define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
......
...@@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, ...@@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */ /* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev) || if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
NEEDS_WaRsDisableCoarsePowerGating(dev))
data[1] = 0; data[1] = 0;
else else
/* bit 0 and 1 are for Render and Media domain separately */ /* bit 0 and 1 are for Render and Media domain separately */
...@@ -587,8 +586,8 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, ...@@ -587,8 +586,8 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_object_create(dev, size);
if (!obj) if (IS_ERR(obj))
return NULL; return NULL;
if (i915_gem_object_get_pages(obj)) { if (i915_gem_object_get_pages(obj)) {
......
This diff is collapsed.
...@@ -58,6 +58,7 @@ struct i915_params i915 __read_mostly = { ...@@ -58,6 +58,7 @@ struct i915_params i915 __read_mostly = {
.guc_log_level = -1, .guc_log_level = -1,
.enable_dp_mst = true, .enable_dp_mst = true,
.inject_load_failure = 0, .inject_load_failure = 0,
.enable_dpcd_backlight = false,
}; };
module_param_named(modeset, i915.modeset, int, 0400); module_param_named(modeset, i915.modeset, int, 0400);
...@@ -210,3 +211,6 @@ MODULE_PARM_DESC(enable_dp_mst, ...@@ -210,3 +211,6 @@ MODULE_PARM_DESC(enable_dp_mst,
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure, MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
MODULE_PARM_DESC(enable_dpcd_backlight,
"Enable support for DPCD backlight control (default:false)");
...@@ -61,6 +61,7 @@ struct i915_params { ...@@ -61,6 +61,7 @@ struct i915_params {
bool verbose_state_checks; bool verbose_state_checks;
bool nuclear_pageflip; bool nuclear_pageflip;
bool enable_dp_mst; bool enable_dp_mst;
bool enable_dpcd_backlight;
}; };
extern struct i915_params i915 __read_mostly; extern struct i915_params i915 __read_mostly;
......
...@@ -2449,6 +2449,8 @@ enum skl_disp_power_wells { ...@@ -2449,6 +2449,8 @@ enum skl_disp_power_wells {
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
#define _FPA0 0x6040 #define _FPA0 0x6040
#define _FPA1 0x6044 #define _FPA1 0x6044
#define _FPB0 0x6048 #define _FPB0 0x6048
...@@ -6031,6 +6033,7 @@ enum skl_disp_power_wells { ...@@ -6031,6 +6033,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PAR1_1 _MMIO(0x42080) #define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15) #define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14) #define FORCE_ARB_IDLE_PLANES (1 << 14)
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4 #define _CHICKEN_PIPESL_1_B 0x420b4
...@@ -6089,7 +6092,14 @@ enum skl_disp_power_wells { ...@@ -6089,7 +6092,14 @@ enum skl_disp_power_wells {
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
#define GEN8_L3SQCREG1 _MMIO(0xB100) #define GEN8_L3SQCREG1 _MMIO(0xB100)
#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 /*
* Note that on CHV the following has an off-by-one error wrt. to BSpec.
* Using the formula in BSpec leads to a hang, while the formula here works
* fine and matches the formulas for all other platforms. A BSpec change
* request has been filed to clarify this.
*/
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
#define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
...@@ -7557,14 +7567,15 @@ enum skl_disp_power_wells { ...@@ -7557,14 +7567,15 @@ enum skl_disp_power_wells {
#define CDCLK_FREQ_540 (1<<26) #define CDCLK_FREQ_540 (1<<26)
#define CDCLK_FREQ_337_308 (2<<26) #define CDCLK_FREQ_337_308 (2<<26)
#define CDCLK_FREQ_675_617 (3<<26) #define CDCLK_FREQ_675_617 (3<<26)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) #define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
/* LCPLL_CTL */ /* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010) #define LCPLL1_CTL _MMIO(0x46010)
......
...@@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev, ...@@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev,
u64 units = 128ULL, div = 100000ULL; u64 units = 128ULL, div = 100000ULL;
u32 ret; u32 ret;
if (!intel_enable_rc6(dev)) if (!intel_enable_rc6())
return 0; return 0;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
...@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev, ...@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
static ssize_t static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *dminor = dev_to_drm_minor(kdev); return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
} }
static ssize_t static ssize_t
...@@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and /* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though * update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */ * frequency request may be unchanged. */
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and /* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though * update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */ * frequency request may be unchanged. */
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
......
...@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ...@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = from->dev->primary->index; __entry->dev = from->i915->dev->primary->index;
__entry->sync_from = from->id; __entry->sync_from = from->id;
__entry->sync_to = to_req->engine->id; __entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
...@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch, ...@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = __entry->dev = req->i915->dev->primary->index;
i915_gem_request_get_engine(req); __entry->ring = req->engine->id;
__entry->dev = engine->dev->primary->index; __entry->seqno = req->seqno;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags; __entry->flags = flags;
i915_trace_irq_get(engine, req); i915_trace_irq_get(req->engine, req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
...@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush, ...@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = req->engine->dev->primary->index; __entry->dev = req->i915->dev->primary->index;
__entry->ring = req->engine->id; __entry->ring = req->engine->id;
__entry->invalidate = invalidate; __entry->invalidate = invalidate;
__entry->flush = flush; __entry->flush = flush;
...@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = __entry->dev = req->i915->dev->primary->index;
i915_gem_request_get_engine(req); __entry->ring = req->engine->id;
__entry->dev = engine->dev->primary->index; __entry->seqno = req->seqno;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u",
...@@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify, ...@@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = engine->dev->primary->index; __entry->dev = engine->i915->dev->primary->index;
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->seqno = engine->get_seqno(engine); __entry->seqno = engine->get_seqno(engine);
), ),
...@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable. * less desirable.
*/ */
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = __entry->dev = req->i915->dev->primary->index;
i915_gem_request_get_engine(req); __entry->ring = req->engine->id;
__entry->dev = engine->dev->primary->index; __entry->seqno = req->seqno;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking = __entry->blocking =
mutex_is_locked(&engine->dev->struct_mutex); mutex_is_locked(&req->i915->dev->struct_mutex);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
...@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm, ...@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->to = to; __entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
__entry->dev = engine->dev->primary->index; __entry->dev = engine->i915->dev->primary->index;
), ),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
......
...@@ -58,15 +58,14 @@ ...@@ -58,15 +58,14 @@
* This function is called at the initialization stage, to detect whether * This function is called at the initialization stage, to detect whether
* running on a vGPU. * running on a vGPU.
*/ */
void i915_check_vgpu(struct drm_device *dev) void i915_check_vgpu(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
uint64_t magic; uint64_t magic;
uint32_t version; uint32_t version;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
if (!IS_HASWELL(dev)) if (!IS_HASWELL(dev_priv))
return; return;
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
...@@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm, ...@@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
/** /**
* intel_vgt_balloon - balloon out reserved graphics address trunks * intel_vgt_balloon - balloon out reserved graphics address trunks
* @dev: drm device * @dev_priv: i915 device
* *
* This function is called at the initialization stage, to balloon out the * This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as * graphic address space allocated to other vGPUs, by marking these spaces as
......
...@@ -110,7 +110,7 @@ struct vgt_if { ...@@ -110,7 +110,7 @@ struct vgt_if {
#define VGT_DRV_DISPLAY_NOT_READY 0 #define VGT_DRV_DISPLAY_NOT_READY 0
#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ #define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
extern void i915_check_vgpu(struct drm_device *dev); extern void i915_check_vgpu(struct drm_i915_private *dev_priv);
extern int intel_vgt_balloon(struct drm_device *dev); extern int intel_vgt_balloon(struct drm_device *dev);
extern void intel_vgt_deballoon(void); extern void intel_vgt_deballoon(void);
......
...@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev, ...@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
static int i915_audio_component_get_cdclk_freq(struct device *dev) static int i915_audio_component_get_cdclk_freq(struct device *dev)
{ {
struct drm_i915_private *dev_priv = dev_to_i915(dev); struct drm_i915_private *dev_priv = dev_to_i915(dev);
int ret;
if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV; return -ENODEV;
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); return dev_priv->cdclk_freq;
ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return ret;
} }
static int i915_audio_component_sync_audio_rate(struct device *dev, static int i915_audio_component_sync_audio_rate(struct device *dev,
......
...@@ -318,6 +318,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, ...@@ -318,6 +318,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return; return;
} }
dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
if (bdb->version >= 191 &&
get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
const struct bdb_lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
dev_priv->vbt.backlight.type = method->type;
}
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness; dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
...@@ -763,6 +772,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv, ...@@ -763,6 +772,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
return; return;
} }
/*
* These fields are introduced from the VBT version 197 onwards,
* so making sure that these bits are set zero in the previous
* versions.
*/
if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
}
/* We have mandatory mipi config blocks. Initialize as generic panel */ /* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
} }
......
...@@ -30,6 +30,14 @@ ...@@ -30,6 +30,14 @@
#ifndef _INTEL_BIOS_H_ #ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_ #define _INTEL_BIOS_H_
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,
INTEL_BACKLIGHT_LPSS,
INTEL_BACKLIGHT_DISPLAY_DDI,
INTEL_BACKLIGHT_DSI_DCS,
INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
};
struct edp_power_seq { struct edp_power_seq {
u16 t1_t3; u16 t1_t3;
u16 t8; u16 t8;
...@@ -113,7 +121,13 @@ struct mipi_config { ...@@ -113,7 +121,13 @@ struct mipi_config {
u16 dual_link:2; u16 dual_link:2;
u16 lane_cnt:2; u16 lane_cnt:2;
u16 pixel_overlap:3; u16 pixel_overlap:3;
u16 rsvd3:9; u16 rgb_flip:1;
#define DL_DCS_PORT_A 0x00
#define DL_DCS_PORT_C 0x01
#define DL_DCS_PORT_A_AND_C 0x02
u16 dl_dcs_cabc_ports:2;
u16 dl_dcs_backlight_ports:2;
u16 rsvd3:4;
u16 rsvd4; u16 rsvd4;
......
...@@ -41,16 +41,22 @@ ...@@ -41,16 +41,22 @@
* be moved to FW_FAILED. * be moved to FW_FAILED.
*/ */
#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" #define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define CSR_MAX_FW_SIZE 0x2FFF #define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
...@@ -169,12 +175,10 @@ struct stepping_info { ...@@ -169,12 +175,10 @@ struct stepping_info {
char substepping; char substepping;
}; };
/*
* Kabylake derivated from Skylake H0, so SKL H0
* is the right firmware for KBL A0 (revid 0).
*/
static const struct stepping_info kbl_stepping_info[] = { static const struct stepping_info kbl_stepping_info[] = {
{'H', '0'}, {'I', '0'} {'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
{'G', '0'}, {'H', '0'}, {'I', '0'},
}; };
static const struct stepping_info skl_stepping_info[] = { static const struct stepping_info skl_stepping_info[] = {
...@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version; csr->version = css_header->version;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_KABYLAKE(dev_priv)) {
required_min_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED; required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) { } else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED; required_min_version = BXT_CSR_VERSION_REQUIRED;
...@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) ...@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv)) if (!HAS_CSR(dev_priv))
return; return;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_KBL;
else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL; csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv)) else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT; csr->fw_path = I915_CSR_BXT;
......
...@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, ...@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
{ {
struct intel_shared_dpll *pll; struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state; struct intel_dpll_hw_state *state;
intel_clock_t clock; struct dpll clock;
/* For DDI ports we always use a shared PLL. */ /* For DDI ports we always use a shared PLL. */
if (WARN_ON(dpll == DPLL_ID_PRIVATE)) if (WARN_ON(dpll == DPLL_ID_PRIVATE))
......
This diff is collapsed.
This diff is collapsed.
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "intel_drv.h"
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
uint8_t reg_val = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
&reg_val) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
DP_EDP_DISPLAY_CONTROL_REGISTER);
return;
}
if (enable)
reg_val |= DP_EDP_BACKLIGHT_ENABLE;
else
reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
reg_val) != 1) {
DRM_DEBUG_KMS("Failed to %s aux backlight\n",
enable ? "enable" : "disable");
}
}
/*
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t read_val[2] = { 0x0 };
uint16_t level = 0;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
return 0;
}
level = read_val[0];
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
level = (read_val[0] << 8 | read_val[1]);
return level;
}
/*
* Sends the current backlight level over the aux channel, checking if its using
* 8-bit or 16 bit value (MSB and LSB)
*/
static void
intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t vals[2] = { 0x0 };
vals[0] = level;
/* Write the MSB and/or LSB */
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
vals[0] = (level & 0xFF00) >> 8;
vals[1] = (level & 0xFF);
}
if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
vals, sizeof(vals)) < 0) {
DRM_DEBUG_KMS("Failed to write aux backlight level\n");
return;
}
}
static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t dpcd_buf = 0;
set_aux_backlight_enable(intel_dp, true);
if ((drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
(dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
}
static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
{
set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
}
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
struct intel_panel *panel = &connector->panel;
intel_dp_aux_enable_backlight(connector);
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
panel->backlight.max = 0xFFFF;
else
panel->backlight.max = 0xFF;
panel->backlight.min = 0;
panel->backlight.level = intel_dp_aux_get_backlight(connector);
panel->backlight.enabled = panel->backlight.level != 0;
return 0;
}
static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
*/
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
!((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
return true;
}
return false;
}
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
if (!i915.enable_dpcd_backlight)
return -ENODEV;
if (!intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight;
panel->backlight.disable = intel_dp_aux_disable_backlight;
panel->backlight.set = intel_dp_aux_set_backlight;
panel->backlight.get = intel_dp_aux_get_backlight;
return 0;
}
This diff is collapsed.
...@@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, ...@@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock; int clock = crtc_state->port_clock;
if (encoder->type == INTEL_OUTPUT_HDMI) { if (encoder->type == INTEL_OUTPUT_HDMI) {
intel_clock_t best_clock; struct dpll best_clock;
/* Calculate HDMI div */ /* Calculate HDMI div */
/* /*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment