Commit b039d6d0 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2016-01-24' of git://anongit.freedesktop.org/drm-intel into drm-next

- support for v3 vbt dsi blocks (Jani)
- improve mmio debug checks (Mika Kuoppala)
- reorg the ddi port translation table entries and related code (Ville)
- reorg gen8 interrupt handling for future platforms (Tvrtko)
- refactor tile width/height computations for framebuffers (Ville)
- kerneldoc integration for intel_pm.c (Jani)
- move default context from engines to device-global dev_priv (Dave Gordon)
- make seqno/irq ordering coherent with execlist (Chris)
- decouple internal engine number from UABI (Chris&Tvrtko)
- tons of small fixes all over, as usual

* tag 'drm-intel-next-2016-01-24' of git://anongit.freedesktop.org/drm-intel: (148 commits)
  drm/i915: Update DRIVER_DATE to 20160124
  drm/i915: Seal busy-ioctl uABI and prevent leaking of internal ids
  drm/i915: Decouple execbuf uAPI from internal implementation
  drm/i915: Use ordered seqno write interrupt generation on gen8+ execlists
  drm/i915: Limit the auto arming of mmio debugs on vlv/chv
  drm/i915: Tune down "GT register while GT waking disabled" message
  drm/i915: tidy up a few leftovers
  drm/i915: abolish separate per-ring default_context pointers
  drm/i915: simplify allocation of driver-internal requests
  drm/i915: Fix NULL plane->fb oops on SKL
  drm/i915: Do not put big intel_crtc_state on the stack
  Revert "drm/i915: Add two-stage ILK-style watermark programming (v10)"
  drm/i915: add DOC: headline to RC6 kernel-doc
  drm/i915: turn some bogus kernel-doc comments to normal comments
  drm/i915/sdvo: revert bogus kernel-doc comments to normal comments
  drm/i915/gen9: Correct max save/restore register count during gpu reset with GuC
  drm/i915: Demote user facing DMC firmware load failure message
  drm/i915: use hlist_for_each_entry
  drm/i915: skl_update_scaler() wants a rotation bitmask instead of bit number
  drm/i915: Don't reject primary plane windowing with color keying enabled on SKL+
  ...
parents 388f7b1d 947eaebc
......@@ -3319,6 +3319,12 @@ int num_ioctls;</synopsis>
!Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc
!Idrivers/gpu/drm/i915/intel_csr.c
</sect2>
<sect2>
<title>Video BIOS Table (VBT)</title>
!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
!Idrivers/gpu/drm/i915/intel_bios.c
!Idrivers/gpu/drm/i915/intel_bios.h
</sect2>
</sect1>
<sect1>
......
......@@ -1331,7 +1331,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct intel_engine_cs *ring;
u64 acthd[I915_NUM_RINGS];
u32 seqno[I915_NUM_RINGS];
int i;
u32 instdone[I915_NUM_INSTDONE_REG];
int i, j;
if (!i915.enable_hangcheck) {
seq_printf(m, "Hangcheck disabled\n");
......@@ -1345,6 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
acthd[i] = intel_ring_get_active_head(ring);
}
i915_get_extra_instdone(dev, instdone);
intel_runtime_pm_put(dev_priv);
if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
......@@ -1365,6 +1368,21 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
(long long)ring->hangcheck.max_acthd);
seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
if (ring->id == RCS) {
seq_puts(m, "\tinstdone read =");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x", instdone[j]);
seq_puts(m, "\n\tinstdone accu =");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x",
ring->hangcheck.instdone[j]);
seq_puts(m, "\n");
}
}
return 0;
......@@ -1942,11 +1960,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
for_each_ring(ring, dev_priv, i) {
if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ",
ring->name);
}
if (ctx == dev_priv->kernel_context)
seq_printf(m, "(kernel context) ");
if (i915.enable_execlists) {
seq_putc(m, '\n');
......@@ -1976,12 +1991,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
static void i915_dump_lrc_obj(struct seq_file *m,
struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj)
struct intel_context *ctx,
struct intel_engine_cs *ring)
{
struct page *page;
uint32_t *reg_state;
int j;
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
unsigned long ggtt_offset = 0;
if (ctx_obj == NULL) {
......@@ -1991,7 +2007,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
}
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
intel_execlists_ctx_id(ctx_obj));
intel_execlists_ctx_id(ctx, ring));
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
......@@ -2037,13 +2053,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
if (ret)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) {
for_each_ring(ring, dev_priv, i) {
if (ring->default_context != ctx)
i915_dump_lrc_obj(m, ring,
ctx->engine[i].state);
}
}
list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context)
for_each_ring(ring, dev_priv, i)
i915_dump_lrc_obj(m, ctx, ring);
mutex_unlock(&dev->struct_mutex);
......@@ -2092,13 +2105,13 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
read_pointer = ring->next_context_status_buffer;
write_pointer = status_pointer & 0x07;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += 6;
write_pointer += GEN8_CSB_ENTRIES;
seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
read_pointer, write_pointer);
for (i = 0; i < 6; i++) {
for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
......@@ -2115,11 +2128,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
struct drm_i915_gem_object *ctx_obj;
ctx_obj = head_req->ctx->engine[ring_id].state;
seq_printf(m, "\tHead request id: %u\n",
intel_execlists_ctx_id(ctx_obj));
intel_execlists_ctx_id(head_req->ctx, ring));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
......
......@@ -1079,7 +1079,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
*/
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
intel_prepare_ddi(dev);
return 0;
}
......@@ -1338,8 +1337,8 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
return 0;
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
wait_for_on ? "on" : "off",
I915_READ(VLV_GTLC_PW_STATUS));
onoff(wait_for_on),
I915_READ(VLV_GTLC_PW_STATUS));
/*
* RC6 transitioning can be delayed up to 2 msec (see
......@@ -1348,7 +1347,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
err = wait_for(COND, 3);
if (err)
DRM_ERROR("timeout waiting for GT wells to go %s\n",
wait_for_on ? "on" : "off");
onoff(wait_for_on));
return err;
#undef COND
......@@ -1359,7 +1358,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
return;
DRM_ERROR("GT register access while GT waking disabled\n");
DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
......@@ -1503,6 +1502,10 @@ static int intel_runtime_suspend(struct device *device)
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
dev_priv->pm.suspended = true;
/*
......@@ -1551,6 +1554,8 @@ static int intel_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
intel_guc_resume(dev);
......
......@@ -34,6 +34,7 @@
#include <uapi/drm/drm_fourcc.h>
#include <drm/drmP.h>
#include "i915_params.h"
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
......@@ -58,7 +59,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20151218"
#define DRIVER_DATE "20160124"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
......@@ -69,11 +70,11 @@
BUILD_BUG_ON(__i915_warn_cond); \
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
#else
#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#endif
#undef WARN_ON_ONCE
#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
......@@ -87,31 +88,25 @@
*/
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
WARN(1, format); \
else \
if (unlikely(__ret_warn_on)) \
if (!WARN(i915.verbose_state_checks, format)) \
DRM_ERROR(format); \
} \
unlikely(__ret_warn_on); \
})
#define I915_STATE_WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
WARN(1, "WARN_ON(" #condition ")\n"); \
else \
DRM_ERROR("WARN_ON(" #condition ")\n"); \
} \
unlikely(__ret_warn_on); \
})
#define I915_STATE_WARN_ON(x) \
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
}
static inline const char *onoff(bool v)
{
return v ? "on" : "off";
}
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
......@@ -339,7 +334,7 @@ struct drm_i915_file_private {
unsigned boosts;
} rps;
struct intel_engine_cs *bsd_ring;
unsigned int bsd_ring;
};
enum intel_dpll_id {
......@@ -633,6 +628,7 @@ struct drm_i915_display_funcs {
struct dpll *best_clock);
int (*compute_pipe_wm)(struct intel_crtc *crtc,
struct drm_atomic_state *state);
void (*program_watermarks)(struct intel_crtc_state *cstate);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
......@@ -657,9 +653,6 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
void (*update_primary_plane)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
......@@ -726,6 +719,8 @@ struct intel_uncore {
i915_reg_t reg_post;
u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
int unclaimed_mmio_check;
};
/* Iterate over initialised fw domains */
......@@ -889,6 +884,9 @@ struct intel_context {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int pin_count;
struct i915_vma *lrc_vma;
u64 lrc_desc;
uint32_t *lrc_reg_state;
} engine[I915_NUM_RINGS];
struct list_head link;
......@@ -1301,7 +1299,7 @@ struct i915_gem_mm {
bool busy;
/* the indicator for dispatch video commands on two BSD rings */
int bsd_ring_dispatch_index;
unsigned int bsd_ring_dispatch_index;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
......@@ -1487,7 +1485,7 @@ struct intel_vbt_data {
u8 seq_version;
u32 size;
u8 *data;
u8 *sequence[MIPI_SEQ_MAX];
const u8 *sequence[MIPI_SEQ_MAX];
} dsi;
int crt_ddc_pin;
......@@ -1784,7 +1782,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_boot_cdclk;
unsigned int cdclk_freq, max_cdclk_freq;
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
......@@ -1829,8 +1827,13 @@ struct drm_i915_private {
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
#endif
/* dpll and cdclk state is protected by connection_mutex */
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
struct i915_workarounds workarounds;
......@@ -1945,6 +1948,8 @@ struct drm_i915_private {
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
struct intel_context *kernel_context;
bool edp_low_vswing;
/* perform PHY state sanity checks? */
......@@ -2265,9 +2270,9 @@ struct drm_i915_gem_request {
};
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out);
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx);
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
......@@ -2576,6 +2581,11 @@ struct drm_i915_cmd_table {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
......@@ -2665,44 +2675,7 @@ extern int i915_max_ioctl;
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
/* i915_params.c */
struct i915_params {
int modeset;
int panel_ignore_lid;
int semaphores;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
int enable_rc6;
int enable_dc;
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
bool enable_guc_submission;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
int edp_vswing;
};
extern struct i915_params i915 __read_mostly;
/* i915_dma.c */
/* i915_dma.c */
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
......@@ -2745,7 +2718,8 @@ extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
......
......@@ -1251,7 +1251,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
s64 before, now;
s64 before = 0; /* Only to silence a compiler warning. */
int ret;
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
......@@ -1271,14 +1271,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
return -ETIME;
timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
/*
* Record current time in case interrupted by signal, or wedged.
*/
before = ktime_get_raw_ns();
}
if (INTEL_INFO(dev_priv)->gen >= 6)
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
/* Optimistic spin for the next jiffie before touching IRQs */
ret = __i915_spin_request(req, state);
......@@ -1343,11 +1346,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
finish_wait(&ring->irq_queue, &wait);
out:
now = ktime_get_raw_ns();
trace_i915_gem_request_wait_end(req);
if (timeout) {
s64 tres = *timeout - (now - before);
s64 tres = *timeout - (ktime_get_raw_ns() - before);
*timeout = tres < 0 ? 0 : tres;
......@@ -2677,10 +2679,8 @@ void i915_gem_request_free(struct kref *req_ref)
i915_gem_request_remove_from_client(req);
if (ctx) {
if (i915.enable_execlists) {
if (ctx != req->ring->default_context)
intel_lr_context_unpin(req);
}
if (i915.enable_execlists && ctx != req->i915->kernel_context)
intel_lr_context_unpin(req);
i915_gem_context_unreference(ctx);
}
......@@ -2688,9 +2688,10 @@ void i915_gem_request_free(struct kref *req_ref)
kmem_cache_free(req->i915->requests, req);
}
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
static inline int
__i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_gem_request *req;
......@@ -2753,6 +2754,31 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
return ret;
}
/**
* i915_gem_request_alloc - allocate a request structure
*
* @engine: engine that we wish to issue the request on.
* @ctx: context that the request will be associated with.
* This can be NULL if the request is not directly related to
* any specific user context, in which case this function will
* choose an appropriate context to use.
*
* Returns a pointer to the allocated request if successful,
* or an error code if not.
*/
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx)
{
struct drm_i915_gem_request *req;
int err;
if (ctx == NULL)
ctx = to_i915(engine->dev)->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
intel_ring_reserved_space_cancel(req->ringbuf);
......@@ -3170,9 +3196,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
return 0;
if (*to_req == NULL) {
ret = i915_gem_request_alloc(to, to->default_context, to_req);
if (ret)
return ret;
struct drm_i915_gem_request *req;
req = i915_gem_request_alloc(to, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
*to_req = req;
}
trace_i915_gem_ring_sync_to(*to_req, from, from_req);
......@@ -3372,9 +3402,9 @@ int i915_gpu_idle(struct drm_device *dev)
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = i915_switch_context(req);
if (ret) {
......@@ -4328,10 +4358,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (ret)
goto unref;
BUILD_BUG_ON(I915_NUM_RINGS > 16);
args->busy = obj->active << 16;
if (obj->last_write_req)
args->busy |= obj->last_write_req->ring->id;
args->busy = 0;
if (obj->active) {
int i;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
if (req)
args->busy |= 1 << (16 + req->ring->exec_id);
}
if (obj->last_write_req)
args->busy |= obj->last_write_req->ring->exec_id;
}
unref:
drm_gem_object_unreference(&obj->base);
......@@ -4832,7 +4872,7 @@ i915_gem_init_hw(struct drm_device *dev)
*/
init_unused_rings(dev);
BUG_ON(!dev_priv->ring[RCS].default_context);
BUG_ON(!dev_priv->kernel_context);
ret = i915_ppgtt_init_hw(dev);
if (ret) {
......@@ -4869,10 +4909,9 @@ i915_gem_init_hw(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req;
WARN_ON(!ring->default_context);
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret) {
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
......@@ -5112,6 +5151,8 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_ring = -1;
ret = i915_gem_context_open(dev, file);
if (ret)
kfree(file_priv);
......
......@@ -347,22 +347,20 @@ void i915_gem_context_reset(struct drm_device *dev)
i915_gem_context_unreference(lctx);
ring->last_context = NULL;
}
/* Force the GPU state to be reinitialised on enabling */
if (ring->default_context)
ring->default_context->legacy_hw_ctx.initialized = false;
}
/* Force the GPU state to be reinitialised on enabling */
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
}
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int i;
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->ring[RCS].default_context))
if (WARN_ON(dev_priv->kernel_context))
return 0;
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
......@@ -392,12 +390,7 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
/* NB: RCS will hold a ref for all rings */
ring->default_context = ctx;
}
dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
......@@ -408,7 +401,7 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
struct intel_context *dctx = dev_priv->kernel_context;
int i;
if (dctx->legacy_hw_ctx.rcs_state) {
......@@ -435,17 +428,17 @@ void i915_gem_context_fini(struct drm_device *dev)
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
for (i = I915_NUM_RINGS; --i >= 0;) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
if (ring->last_context)
if (ring->last_context) {
i915_gem_context_unreference(ring->last_context);
ring->default_context = NULL;
ring->last_context = NULL;
ring->last_context = NULL;
}
}
i915_gem_context_unreference(dctx);
dev_priv->kernel_context = NULL;
}
int i915_gem_context_enable(struct drm_i915_gem_request *req)
......
......@@ -193,13 +193,10 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
return eb->lut[handle];
} else {
struct hlist_head *head;
struct hlist_node *node;
struct i915_vma *vma;
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
struct i915_vma *vma;
vma = hlist_entry(node, struct i915_vma, exec_node);
hlist_for_each_entry(vma, head, exec_node) {
if (vma->exec_handle == handle)
return vma;
}
......@@ -1309,6 +1306,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
exec_start = params->batch_obj_vm_offset +
params->args_batch_start_offset;
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
......@@ -1325,33 +1325,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
/**
* Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned.
* The ring index is returned.
*/
static int gen8_dispatch_bsd_ring(struct drm_device *dev,
struct drm_file *file)
static unsigned int
gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
/* Check whether the file_priv is using one ring */
if (file_priv->bsd_ring)
return file_priv->bsd_ring->id;
else {
/* If no, use the ping-pong mechanism to select one ring */
int ring_id;
mutex_lock(&dev->struct_mutex);
if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
ring_id = VCS;
dev_priv->mm.bsd_ring_dispatch_index = 1;
} else {
ring_id = VCS2;
dev_priv->mm.bsd_ring_dispatch_index = 0;
}
file_priv->bsd_ring = &dev_priv->ring[ring_id];
mutex_unlock(&dev->struct_mutex);
return ring_id;
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_ring < 0) {
/* If not, use the ping-pong mechanism to select one. */
mutex_lock(&dev_priv->dev->struct_mutex);
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
mutex_unlock(&dev_priv->dev->struct_mutex);
}
return file_priv->bsd_ring;
}
static struct drm_i915_gem_object *
......@@ -1374,6 +1364,63 @@ eb_get_batch(struct eb_vmas *eb)
return vma->obj;
}
#define I915_USER_RINGS (4)
static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_DEFAULT] = RCS,
[I915_EXEC_RENDER] = RCS,
[I915_EXEC_BLT] = BCS,
[I915_EXEC_BSD] = VCS,
[I915_EXEC_VEBOX] = VECS
};
static int
eb_select_ring(struct drm_i915_private *dev_priv,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct intel_engine_cs **ring)
{
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
if (user_ring_id > I915_USER_RINGS) {
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
return -EINVAL;
}
if ((user_ring_id != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx--;
} else {
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
bsd_idx);
return -EINVAL;
}
*ring = &dev_priv->ring[_VCS(bsd_idx)];
} else {
*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
}
if (!intel_ring_initialized(*ring)) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return -EINVAL;
}
return 0;
}
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
......@@ -1381,6 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
......@@ -1411,51 +1459,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) {
int ring_id;
switch (args->flags & I915_EXEC_BSD_MASK) {
case I915_EXEC_BSD_DEFAULT:
ring_id = gen8_dispatch_bsd_ring(dev, file);
ring = &dev_priv->ring[ring_id];
break;
case I915_EXEC_BSD_RING1:
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BSD_RING2:
ring = &dev_priv->ring[VCS2];
break;
default:
DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
(int)(args->flags & I915_EXEC_BSD_MASK));
return -EINVAL;
}
} else
ring = &dev_priv->ring[VCS];
} else
ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
ret = eb_select_ring(dev_priv, file, args, &ring);
if (ret)
return ret;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
......@@ -1602,11 +1608,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
ret = i915_gem_request_alloc(ring, ctx, &params->request);
if (ret)
req = i915_gem_request_alloc(ring, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
}
ret = i915_gem_request_add_to_client(params->request, file);
ret = i915_gem_request_add_to_client(req, file);
if (ret)
goto err_batch_unpin;
......@@ -1622,6 +1630,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
......@@ -1645,8 +1654,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
if (ret && params->request)
i915_gem_request_cancel(params->request);
if (ret && req)
i915_gem_request_cancel(req);
mutex_unlock(&dev->struct_mutex);
......
......@@ -96,9 +96,11 @@
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
const struct i915_ggtt_view i915_ggtt_view_normal;
const struct i915_ggtt_view i915_ggtt_view_normal = {
.type = I915_GGTT_VIEW_NORMAL,
};
const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED
.type = I915_GGTT_VIEW_ROTATED,
};
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
......@@ -3329,7 +3331,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
}
static struct scatterlist *
rotate_pages(dma_addr_t *in, unsigned int offset,
rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int width, unsigned int height,
struct sg_table *st, struct scatterlist *sg)
{
......
......@@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
......
......@@ -47,6 +47,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
static int num_vma_bound(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int count = 0;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (drm_mm_node_allocated(&vma->node))
count++;
if (vma->pin_count)
count++;
}
return count;
}
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
}
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
/* Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
*
* If the pages are pinned for any other reason than being bound
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
if (obj->pages_pin_count != num_vma_bound(obj))
return false;
/* We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
return swap_available() || obj->madv == I915_MADV_DONTNEED;
}
/**
* i915_gem_shrink - Shrink buffer object caches
* @dev_priv: i915 device
......@@ -129,6 +169,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
continue;
if (!can_release_pages(obj))
continue;
drm_gem_object_reference(&obj->base);
/* For the unbound phase, this should be a no-op! */
......@@ -188,21 +231,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
return true;
}
static int num_vma_bound(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int count = 0;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (drm_mm_node_allocated(&vma->node))
count++;
if (vma->pin_count)
count++;
}
return count;
}
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
......@@ -222,7 +250,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
if (!obj->active && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
......
......@@ -569,6 +569,9 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj->pages == NULL)
goto cleanup;
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
......
......@@ -1050,7 +1050,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
if (request)
rbuf = request->ctx->engine[ring->id].ringbuf;
else
rbuf = ring->default_context->engine[ring->id].ringbuf;
rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
} else
rbuf = ring->buffer;
......
......@@ -40,6 +40,7 @@
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define UOS_RSA_SCRATCH_MAX_COUNT 64
......
......@@ -158,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev_priv->dev) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
(IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
(IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
if (!intel_enable_rc6(dev) ||
NEEDS_WaRsDisableCoarsePowerGating(dev))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
......@@ -246,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
}
/* Finally, update the cached copy of the GuC's WQ head */
gc->wq_head = desc->head;
kunmap_atomic(base);
return ret;
}
......@@ -471,28 +472,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
sizeof(desc) * client->ctx_index);
}
/* Get valid workqueue item and return it back to offset */
static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
int i915_guc_wq_check_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
if (!gc)
return 0;
/* Quickly return if wq space is available since last time we cache the
* head position. */
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
return 0;
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
*offset = gc->wq_tail;
/* advance the tail for next workqueue item */
gc->wq_tail += size;
gc->wq_tail &= gc->wq_size - 1;
gc->wq_head = desc->head;
/* this will break the loop */
timeout_counter = 0;
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
ret = 0;
break;
}
if (timeout_counter)
......@@ -510,12 +513,16 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
enum intel_ring_id ring_id = rq->ring->id;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off = 0;
int ret;
u32 tail, wq_len, wq_off, space;
space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
if (WARN_ON(space < sizeof(struct guc_wq_item)))
return -ENOSPC; /* shouldn't happen */
ret = guc_get_workqueue_space(gc, &wq_off);
if (ret)
return ret;
/* postincrement WQ tail for next time */
wq_off = gc->wq_tail;
gc->wq_tail += sizeof(struct guc_wq_item);
gc->wq_tail &= gc->wq_size - 1;
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
......@@ -832,6 +839,96 @@ static void guc_create_log(struct intel_guc *guc)
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
static void init_guc_policies(struct guc_policies *policies)
{
struct guc_policy *policy;
u32 p, i;
policies->dpc_promote_time = 500000;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
for (i = 0; i < I915_NUM_RINGS; i++) {
policy = &policies->policy[p][i];
policy->execution_quantum = 1000000;
policy->preemption_time = 500000;
policy->fault_time = 250000;
policy->policy_flags = 0;
}
}
policies->is_valid = 1;
}
static void guc_create_ads(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *ring;
struct page *page;
u32 size, i;
/* The ads obj includes the struct itself and buffers passed to GuC */
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
sizeof(struct guc_mmio_reg_state) +
GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
obj = guc->ads_obj;
if (!obj) {
obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
if (!obj)
return;
guc->ads_obj = obj;
}
page = i915_gem_object_get_page(obj, 0);
ads = kmap(page);
/*
* The GuC requires a "Golden Context" when it reinitialises
* engines after a reset. Here we use the Render ring default
* context, which must already exist and be pinned in the GGTT,
* so its address won't change after we've told the GuC where
* to find it.
*/
ring = &dev_priv->ring[RCS];
ads->golden_context_lrca = ring->status_page.gfx_addr;
for_each_ring(ring, dev_priv, i)
ads->eng_state_size[i] = intel_lr_context_size(ring);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
init_guc_policies(policies);
ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
sizeof(struct guc_ads);
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
for (i = 0; i < I915_NUM_RINGS; i++) {
reg_state->mmio_white_list[i].mmio_start =
dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
reg_state->mmio_white_list[i].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +
sizeof(struct guc_policies);
ads->reg_state_buffer = ads->reg_state_addr +
sizeof(struct guc_mmio_reg_state);
kunmap(page);
}
/*
* Set up the memory resources to be shared with the GuC. At this point,
* we require just one object that can be mapped through the GGTT.
......@@ -858,6 +955,8 @@ int i915_guc_submission_init(struct drm_device *dev)
guc_create_log(guc);
guc_create_ads(guc);
return 0;
}
......@@ -865,7 +964,7 @@ int i915_guc_submission_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct intel_context *ctx = dev_priv->ring[RCS].default_context;
struct intel_context *ctx = dev_priv->kernel_context;
struct i915_guc_client *client;
/* client for execbuf submission */
......@@ -896,6 +995,9 @@ void i915_guc_submission_fini(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
gem_release_guc_obj(dev_priv->guc.ads_obj);
guc->ads_obj = NULL;
gem_release_guc_obj(dev_priv->guc.log_obj);
guc->log_obj = NULL;
......@@ -919,7 +1021,7 @@ int intel_guc_suspend(struct drm_device *dev)
if (!i915.enable_guc_submission)
return 0;
ctx = dev_priv->ring[RCS].default_context;
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
......@@ -945,7 +1047,7 @@ int intel_guc_resume(struct drm_device *dev)
if (!i915.enable_guc_submission)
return 0;
ctx = dev_priv->ring[RCS].default_context;
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
......
This diff is collapsed.
......@@ -22,6 +22,7 @@
* IN THE SOFTWARE.
*/
#include "i915_params.h"
#include "i915_drv.h"
struct i915_params i915 __read_mostly = {
......
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _I915_PARAMS_H_
#define _I915_PARAMS_H_
#include <linux/cache.h> /* for __read_mostly */
struct i915_params {
int modeset;
int panel_ignore_lid;
int semaphores;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
int enable_rc6;
int enable_dc;
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
int edp_vswing;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
};
extern struct i915_params i915 __read_mostly;
#endif
......@@ -1711,6 +1711,11 @@ enum skl_disp_power_wells {
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1<<31)
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
#define CLAIM_ER_CLR (1 << 31)
#define CLAIM_ER_OVERFLOW (1 << 16)
#define CLAIM_ER_CTR_MASK 0xffff
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
#define DERRMR_PIPEA_SCANLINE (1<<0)
......
......@@ -164,7 +164,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
......@@ -200,7 +200,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
......@@ -521,7 +521,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct device *kdev = kobj_to_dev(kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct i915_error_state_file_priv error_priv;
......@@ -556,7 +556,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct device *kdev = kobj_to_dev(kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
int ret;
......
......@@ -308,5 +308,5 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
state->dpll_set = false;
state->dpll_set = state->modeset = false;
}
......@@ -152,9 +152,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
crtc_state->base.active ? crtc_state->pipe_src_w : 0;
crtc_state->base.enable ? crtc_state->pipe_src_w : 0;
intel_state->clip.y2 =
crtc_state->base.active ? crtc_state->pipe_src_h : 0;
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
......@@ -194,8 +194,16 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(old_state->state, crtc);
intel_plane->commit_plane(plane, intel_state);
if (intel_state->visible)
intel_plane->update_plane(plane,
to_intel_crtc_state(crtc_state),
intel_state);
else
intel_plane->disable_plane(plane, crtc);
}
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
......
This diff is collapsed.
......@@ -25,25 +25,43 @@
*
*/
#ifndef _I830_BIOS_H_
#define _I830_BIOS_H_
#ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_
/**
* struct vbt_header - VBT Header structure
* @signature: VBT signature, always starts with "$VBT"
* @version: Version of this structure
* @header_size: Size of this structure
* @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
* @vbt_checksum: Checksum
* @reserved0: Reserved
* @bdb_offset: Offset of &struct bdb_header from beginning of VBT
* @aim_offset: Offsets of add-in data blocks from beginning of VBT
*/
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 vbt_size; /**< in bytes */
u8 signature[20];
u16 version;
u16 header_size;
u16 vbt_size;
u8 vbt_checksum;
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
u32 bdb_offset;
u32 aim_offset[4];
} __packed;
/**
* struct bdb_header - BDB Header structure
* @signature: BDB signature "BIOS_DATA_BLOCK"
* @version: Version of the data block definitions
* @header_size: Size of this structure
* @bdb_size: Size of BDB (BDB Header and data blocks)
*/
struct bdb_header {
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 bdb_size; /**< in bytes */
u8 signature[16];
u16 version;
u16 header_size;
u16 bdb_size;
} __packed;
/* strictly speaking, this is a "skip" block, but it has interesting info */
......@@ -936,21 +954,29 @@ struct bdb_mipi_sequence {
/* MIPI Sequnece Block definitions */
enum mipi_seq {
MIPI_SEQ_UNDEFINED = 0,
MIPI_SEQ_END = 0,
MIPI_SEQ_ASSERT_RESET,
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
MIPI_SEQ_DEASSERT_RESET,
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
MIPI_SEQ_POWER_ON, /* sequence block v3+ */
MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
MIPI_SEQ_MAX
};
enum mipi_seq_element {
MIPI_SEQ_ELEM_UNDEFINED = 0,
MIPI_SEQ_ELEM_END = 0,
MIPI_SEQ_ELEM_SEND_PKT,
MIPI_SEQ_ELEM_DELAY,
MIPI_SEQ_ELEM_GPIO,
MIPI_SEQ_ELEM_STATUS,
MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
MIPI_SEQ_ELEM_MAX
};
......@@ -965,4 +991,4 @@ enum mipi_gpio_pin_index {
MIPI_GPIO_MAX
};
#endif /* _I830_BIOS_H_ */
#endif /* _INTEL_BIOS_H_ */
......@@ -44,6 +44,8 @@
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
......@@ -278,10 +280,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
" [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
......@@ -399,7 +402,10 @@ static void csr_load_work_fn(struct work_struct *work)
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
dev_notice(dev_priv->dev->dev,
"Failed to load DMC firmware"
" [" FIRMWARE_URL "],"
" disabling runtime power management.\n");
}
release_firmware(fw);
......@@ -421,7 +427,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
if (IS_SKYLAKE(dev_priv))
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
......
This diff is collapsed.
This diff is collapsed.
......@@ -157,14 +157,9 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
u8 source_max, sink_max;
source_max = 4;
if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
(intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
source_max = 2;
source_max = intel_dig_port->max_lanes;
sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
return min(source_max, sink_max);
......@@ -340,8 +335,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = IS_CHERRYVIEW(dev) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll);
if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
return;
}
}
/*
......@@ -2243,11 +2242,6 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
_intel_edp_backlight_off(intel_dp);
}
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
}
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
......@@ -2257,7 +2251,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
port_name(dig_port->port),
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
......@@ -2267,7 +2261,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
......@@ -5839,6 +5833,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
int type, ret;
if (WARN(intel_dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
return false;
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
......@@ -6037,6 +6036,7 @@ intel_dp_init(struct drm_device *dev,
intel_dig_port->port = port;
dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
if (IS_CHERRYVIEW(dev)) {
......
......@@ -184,7 +184,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
intel_mst->port = found->port;
if (intel_dp->active_mst_links == 0) {
intel_ddi_clk_select(encoder, intel_crtc->config);
intel_prepare_ddi_buffer(&intel_dig_port->base);
intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
intel_dp_set_link_params(intel_dp, intel_crtc->config);
......
......@@ -246,7 +246,18 @@ struct intel_atomic_state {
struct drm_atomic_state base;
unsigned int cdclk;
bool dpll_set;
/*
* Calculated device cdclk, can be different from cdclk
* only when all crtc's are DPMS off.
*/
unsigned int dev_cdclk;
bool dpll_set, modeset;
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
};
......@@ -647,23 +658,17 @@ struct intel_plane {
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
* the intel_plane_state structure and accessed via drm_plane->state.
* the intel_plane_state structure and accessed via plane_state.
*/
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*check_plane)(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
};
struct intel_watermark_params {
......@@ -817,6 +822,7 @@ struct intel_digital_port {
struct intel_hdmi hdmi;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
......@@ -996,7 +1002,7 @@ void intel_crt_init(struct drm_device *dev);
/* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
void intel_prepare_ddi(struct drm_device *dev);
void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
......@@ -1041,8 +1047,8 @@ unsigned int intel_fb_align_height(struct drm_device *dev,
uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
enum fb_op_origin origin);
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format);
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format);
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
......@@ -1126,9 +1132,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state);
unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
uint64_t fb_format_modifier, unsigned int plane);
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp);
static inline bool
intel_rotation_90_or_270(unsigned int rotation)
......@@ -1149,8 +1154,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state);
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
/* modesetting asserts */
......@@ -1167,11 +1172,11 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv,
int *x, int *y,
uint64_t fb_modifier,
unsigned int cpp,
unsigned int pitch);
void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
......
......@@ -702,7 +702,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
u32 pclk = 0;
u32 pclk;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
......@@ -713,12 +713,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
*/
pipe_config->dpll_hw_state.dpll_md = 0;
if (IS_BROXTON(encoder->base.dev))
pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
else if (IS_VALLEYVIEW(encoder->base.dev) ||
IS_CHERRYVIEW(encoder->base.dev))
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
if (!pclk)
return;
......
......@@ -126,8 +126,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
......
This diff is collapsed.
This diff is collapsed.
......@@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
......@@ -171,8 +171,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
out:
mutex_unlock(&dev->struct_mutex);
if (!IS_ERR_OR_NULL(fb))
drm_framebuffer_unreference(fb);
return ret;
}
......
......@@ -43,6 +43,7 @@ struct i915_guc_client {
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
uint32_t wq_head;
/* GuC submission statistics & status */
uint64_t submissions[I915_NUM_RINGS];
......@@ -88,6 +89,8 @@ struct intel_guc {
uint32_t log_flags;
struct drm_i915_gem_object *log_obj;
struct drm_i915_gem_object *ads_obj;
struct drm_i915_gem_object *ctx_pool_obj;
struct ida ctx_ids;
......@@ -122,5 +125,6 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev);
int i915_guc_wq_check_space(struct i915_guc_client *client);
#endif
This diff is collapsed.
This diff is collapsed.
......@@ -2033,6 +2033,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
if (WARN(intel_dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
return;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
......@@ -2218,6 +2223,7 @@ void intel_hdmi_init(struct drm_device *dev,
dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -48,6 +48,12 @@ static sector_t map_swap_entry(swp_entry_t, struct block_device**);
DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
atomic_long_t nr_swap_pages;
/*
* Some modules use swappable objects and may try to swap them out under
* memory pressure (via the shrinker). Before doing so, they may wish to
* check to see if any swap space is available.
*/
EXPORT_SYMBOL_GPL(nr_swap_pages);
/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
long total_swap_pages;
static int least_priority;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment