Commit 605b28c8 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2016-04-11' of git://anongit.freedesktop.org/drm-intel into drm-next

- make modeset hw state checker atomic aware (Maarten)
- close races in gpu stuck detection/seqno reading (Chris)
- tons&tons of small improvements from Chris Wilson all over the gem code
- more dsi/bxt work from Ramalingam&Jani
- macro polish from Joonas
- guc fw loading fixes (Arun&Dave)
- vmap notifier (acked by Andrew) + i915 support by Chris Wilson
- create bottom half for execlist irq processing (Chris Wilson)
- vlv/chv pll cleanup (Ville)
- rework DP detection, especially sink detection (Shubhangi Shrivastava)
- make color manager support fully atomic (Maarten)
- avoid livelock on chv in execlist irq handler (Chris)

* tag 'drm-intel-next-2016-04-11' of git://anongit.freedesktop.org/drm-intel: (82 commits)
  drm/i915: Update DRIVER_DATE to 20160411
  drm/i915: Avoid allocating a vmap arena for a single page
  drm,i915: Introduce drm_malloc_gfp()
  drm/i915/shrinker: Restrict vmap purge to objects with vmaps
  drm/i915: Refactor duplicate object vmap functions
  drm/i915: Consolidate common error handling in intel_pin_and_map_ringbuffer_obj
  drm/i915/dmabuf: Tighten struct_mutex for unmap_dma_buf
  drm/i915: implement WaClearTdlStateAckDirtyBits
  drm/i915/bxt: Reversed polarity of PORT_PLL_REF_SEL bit
  drm/i915: Rename hw state checker to hw state verifier.
  drm/i915: Move modeset state verifier calls.
  drm/i915: Make modeset state verifier take crtc as argument.
  drm/i915: Replace manual barrier() with READ_ONCE() in HWS accessor
  drm/i915: Use simplest form for flushing the single cacheline in the HWS
  drm/i915: Harden detection of missed interrupts
  drm/i915: Separate out the seqno-barrier from engine->get_seqno
  drm/i915: Remove forcewake dance from seqno/irq barrier on legacy gen6+
  drm/i915: Fixup the free space logic in ring_prepare
  drm/i915: Simplify check for idleness in hangcheck
  drm/i915: Apply a mb between emitting the request and hangcheck
  ...
parents 49047962 ba3150ac
......@@ -134,6 +134,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
int pin_count = 0;
enum intel_engine_id id;
lockdep_assert_held(&obj->base.dev->struct_mutex);
seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
&obj->base,
obj->active ? "*" : " ",
......@@ -202,8 +204,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
u64 total_obj_size, total_gtt_size;
int count, ret;
......@@ -216,11 +218,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
switch (list) {
case ACTIVE_LIST:
seq_puts(m, "Active:\n");
head = &vm->active_list;
head = &ggtt->base.active_list;
break;
case INACTIVE_LIST:
seq_puts(m, "Inactive:\n");
head = &vm->inactive_list;
head = &ggtt->base.inactive_list;
break;
default:
mutex_unlock(&dev->struct_mutex);
......@@ -429,11 +431,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_file *file;
struct i915_vma *vma;
int ret;
......@@ -452,12 +454,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->active_list, vm_link);
count_vmas(&ggtt->base.active_list, vm_link);
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->inactive_list, vm_link);
count_vmas(&ggtt->base.inactive_list, vm_link);
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
......@@ -492,8 +494,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size);
seq_printf(m, "%llu [%llu] gtt total\n",
dev_priv->ggtt.base.total,
(u64)dev_priv->ggtt.mappable_end - dev_priv->ggtt.base.start);
ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
......@@ -597,7 +598,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
engine->get_seqno(engine, true),
engine->get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
......@@ -727,10 +728,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *engine)
{
if (engine->get_seqno) {
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, engine->get_seqno(engine, false));
}
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, engine->get_seqno(engine));
seq_printf(m, "Current user interrupts (%s): %x\n",
engine->name, READ_ONCE(engine->user_interrupts));
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
......@@ -1345,8 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
for_each_engine_id(engine, dev_priv, id) {
seqno[id] = engine->get_seqno(engine, false);
acthd[id] = intel_ring_get_active_head(engine);
seqno[id] = engine->get_seqno(engine);
}
i915_get_extra_instdone(dev, instdone);
......@@ -1362,8 +1363,13 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for_each_engine_id(engine, dev_priv, id) {
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x]\n",
engine->hangcheck.seqno, seqno[id]);
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno,
seqno[id],
engine->last_submitted_seqno);
seq_printf(m, "\tuser interrupts = %x [current %x]\n",
engine->hangcheck.user_interrupts,
READ_ONCE(engine->user_interrupts));
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
......@@ -1895,6 +1901,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (to_i915(dev)->fbdev) {
......@@ -1929,6 +1940,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&dev->struct_mutex);
return 0;
}
......@@ -2093,7 +2105,6 @@ static int i915_execlists(struct seq_file *m, void *data)
for_each_engine(engine, dev_priv) {
struct drm_i915_gem_request *head_req = NULL;
int count = 0;
unsigned long flags;
seq_printf(m, "%s\n", engine->name);
......@@ -2120,13 +2131,13 @@ static int i915_execlists(struct seq_file *m, void *data)
i, status, ctx_id);
}
spin_lock_irqsave(&engine->execlist_lock, flags);
spin_lock_bh(&engine->execlist_lock);
list_for_each(cursor, &engine->execlist_queue)
count++;
head_req = list_first_entry_or_null(&engine->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
spin_unlock_irqrestore(&engine->execlist_lock, flags);
spin_unlock_bh(&engine->execlist_lock);
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
......@@ -2409,7 +2420,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
u32 tmp, i;
if (!HAS_GUC_UCODE(dev_priv->dev))
if (!HAS_GUC_UCODE(dev_priv))
return 0;
seq_printf(m, "GuC firmware status:\n");
......@@ -2483,7 +2494,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
struct intel_engine_cs *engine;
u64 total = 0;
if (!HAS_GUC_SCHED(dev_priv->dev))
if (!HAS_GUC_SCHED(dev_priv))
return 0;
if (mutex_lock_interruptible(&dev->struct_mutex))
......@@ -2687,10 +2698,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_RUNTIME_PM(dev)) {
seq_puts(m, "not supported\n");
return 0;
}
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
......@@ -2701,6 +2710,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
#else
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
seq_printf(m, "PCI device power state: %s [%d]\n",
pci_power_name(dev_priv->dev->pdev->current_state),
dev_priv->dev->pdev->current_state);
return 0;
}
......
......@@ -493,9 +493,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
* Some ports require correctly set-up hpd registers for detection to
* work properly (leading to ghost connected connector status), e.g. VGA
* on gm45. Hence we can only set up the initial fbdev config after hpd
* irqs are fully enabled. We protect the fbdev initial config scanning
* against hotplug events by waiting in intel_fbdev_output_poll_changed
* until the asynchronous thread has finished.
* irqs are fully enabled. Now we should scan for the initial config
* only once hotplug handling is enabled, but due to screwed-up locking
* around kms/fbdev init we can't protect the fdbev initial config
* scanning against hotplug events. Hence do this first and ignore the
* tiny window where we will loose hotplug notifactions.
*/
intel_fbdev_initial_config_async(dev);
......@@ -527,6 +529,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool primary;
int ret;
......@@ -534,8 +537,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return -ENOMEM;
ap->ranges[0].base = dev_priv->ggtt.mappable_base;
ap->ranges[0].size = dev_priv->ggtt.mappable_end;
ap->ranges[0].base = ggtt->mappable_base;
ap->ranges[0].size = ggtt->mappable_end;
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
......@@ -1170,6 +1173,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t aperture_size;
int ret;
......@@ -1178,7 +1182,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_device_info_runtime_init(dev);
ret = i915_gem_gtt_init(dev);
ret = i915_ggtt_init_hw(dev);
if (ret)
return ret;
......@@ -1187,13 +1191,13 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = i915_kick_out_firmware_fb(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
goto out_gtt;
goto out_ggtt;
}
ret = i915_kick_out_vgacon(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n");
goto out_gtt;
goto out_ggtt;
}
pci_set_master(dev->pdev);
......@@ -1213,17 +1217,17 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
aperture_size = dev_priv->ggtt.mappable_end;
aperture_size = ggtt->mappable_end;
dev_priv->ggtt.mappable =
io_mapping_create_wc(dev_priv->ggtt.mappable_base,
ggtt->mappable =
io_mapping_create_wc(ggtt->mappable_base,
aperture_size);
if (dev_priv->ggtt.mappable == NULL) {
if (!ggtt->mappable) {
ret = -EIO;
goto out_gtt;
goto out_ggtt;
}
dev_priv->ggtt.mtrr = arch_phys_wc_add(dev_priv->ggtt.mappable_base,
ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
aperture_size);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
......@@ -1253,8 +1257,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
return 0;
out_gtt:
i915_global_gtt_cleanup(dev);
out_ggtt:
i915_ggtt_cleanup_hw(dev);
return ret;
}
......@@ -1266,14 +1270,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
arch_phys_wc_del(dev_priv->ggtt.mtrr);
io_mapping_free(dev_priv->ggtt.mappable);
i915_global_gtt_cleanup(dev);
arch_phys_wc_del(ggtt->mtrr);
io_mapping_free(ggtt->mappable);
i915_ggtt_cleanup_hw(dev);
}
/**
......
......@@ -360,14 +360,12 @@ static const struct intel_device_info intel_broxton_info = {
static const struct intel_device_info intel_kabylake_info = {
BDW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
BDW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
......@@ -1402,7 +1400,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
if (err)
goto err2;
if (!IS_CHERRYVIEW(dev_priv->dev))
if (!IS_CHERRYVIEW(dev_priv))
vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
......@@ -1434,7 +1432,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/
ret = vlv_force_gfx_clock(dev_priv, true);
if (!IS_CHERRYVIEW(dev_priv->dev))
if (!IS_CHERRYVIEW(dev_priv))
vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
......
......@@ -60,7 +60,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20160330"
#define DRIVER_DATE "20160411"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
......@@ -495,6 +495,7 @@ struct drm_i915_error_state {
u32 cpu_ring_head;
u32 cpu_ring_tail;
u32 last_seqno;
u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */
......@@ -612,8 +613,8 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
void (*load_csc_matrix)(struct drm_crtc *crtc);
void (*load_luts)(struct drm_crtc *crtc);
void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
void (*load_luts)(struct drm_crtc_state *crtc_state);
};
enum forcewake_domain_id {
......@@ -1118,6 +1119,7 @@ struct intel_gen6_power_mgmt {
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
u8 up_threshold; /* Current %busy required to uplock */
u8 down_threshold; /* Current %busy required to downclock */
......@@ -1257,6 +1259,7 @@ struct i915_gem_mm {
struct i915_hw_ppgtt *aliasing_ppgtt;
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
bool shrinker_no_lock_stealing;
......@@ -1837,6 +1840,13 @@ struct drm_i915_private {
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
const struct intel_dpll_mgr *dpll_mgr;
/*
* dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
* Must be global rather than per dpll, because on some platforms
* plls share registers.
*/
struct mutex dpll_lock;
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
......@@ -1893,7 +1903,14 @@ struct drm_i915_private {
u32 fdi_rx_config;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
u32 chv_phy_control;
/*
* Shadows for CHV DPLL_MD regs to keep the state
* checker somewhat working in the presence hardware
* crappiness (can't read out DPLL_MD for pipes B & C).
*/
u32 chv_dpll_md[I915_MAX_PIPES];
u32 suspend_count;
bool suspended_to_idle;
......@@ -2152,10 +2169,7 @@ struct drm_i915_gem_object {
struct scatterlist *sg;
int last;
} get_page;
/* prime dma-buf support */
void *dma_buf_vmapping;
int vmapping_count;
void *mapping;
/** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers.
......@@ -2732,6 +2746,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
......@@ -2970,12 +2985,44 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->pages == NULL);
obj->pages_pin_count++;
}
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages_pin_count == 0);
obj->pages_pin_count--;
}
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj - the object to map into kernel address space
*
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
* pages and then returns a contiguous mapping of the backing storage into
* the kernel address space.
*
* The caller must hold the struct_mutex.
*
* Returns the pointer through which to access the backing storage.
*/
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
/**
* i915_gem_object_unpin_map - releases an earlier mapping
* @obj - the object to unmap
*
* After pinning the object and mapping its pages, once you are finished
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
*
* The caller must hold the struct_mutex.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
}
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to,
......@@ -2999,15 +3046,19 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency);
return i915_seqno_passed(seqno, req->previous_seqno);
if (!lazy_coherency && req->engine->irq_seqno_barrier)
req->engine->irq_seqno_barrier(req->engine);
return i915_seqno_passed(req->engine->get_seqno(req->engine),
req->previous_seqno);
}
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency);
return i915_seqno_passed(seqno, req->seqno);
if (!lazy_coherency && req->engine->irq_seqno_barrier)
req->engine->irq_seqno_barrier(req->engine);
return i915_seqno_passed(req->engine->get_seqno(req->engine),
req->seqno);
}
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
......@@ -3147,13 +3198,9 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
WARN_ON(i915_is_ggtt(vm));
return container_of(vm, struct i915_hw_ppgtt, base);
}
......@@ -3166,7 +3213,10 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
return i915_gem_obj_size(obj, &ggtt->base);
}
static inline int __must_check
......@@ -3174,7 +3224,10 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
unsigned flags)
{
return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
return i915_gem_object_pin(obj, &ggtt->base,
alignment, flags | PIN_GLOBAL);
}
......@@ -3289,6 +3342,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8
#define I915_SHRINK_VMAPS 0x10
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
......@@ -3388,6 +3442,8 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
/* intel_opregion.c */
#ifdef CONFIG_ACPI
......
This diff is collapsed.
......@@ -95,14 +95,12 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
mutex_lock(&obj->base.dev->struct_mutex);
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
mutex_unlock(&obj->base.dev->struct_mutex);
}
......@@ -110,51 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
struct sg_page_iter sg_iter;
struct page **pages;
int ret, i;
void *addr;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ERR_PTR(ret);
if (obj->dma_buf_vmapping) {
obj->vmapping_count++;
goto out_unlock;
}
ret = i915_gem_object_get_pages(obj);
if (ret)
goto err;
i915_gem_object_pin_pages(obj);
ret = -ENOMEM;
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL)
goto err_unpin;
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
pages[i++] = sg_page_iter_page(&sg_iter);
obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
drm_free_large(pages);
if (!obj->dma_buf_vmapping)
goto err_unpin;
obj->vmapping_count = 1;
out_unlock:
addr = i915_gem_object_pin_map(obj);
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
err_unpin:
i915_gem_object_unpin_pages(obj);
err:
mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret);
return addr;
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
......@@ -163,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
struct drm_device *dev = obj->base.dev;
mutex_lock(&dev->struct_mutex);
if (--obj->vmapping_count == 0) {
vunmap(obj->dma_buf_vmapping);
obj->dma_buf_vmapping = NULL;
i915_gem_object_unpin_pages(obj);
}
i915_gem_object_unpin_map(obj);
mutex_unlock(&dev->struct_mutex);
}
......
......@@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
uint64_t target_offset)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset;
void __iomem *reloc_page;
......@@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
......@@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
reloc_page =
io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
io_mapping_map_atomic_wc(ggtt->mappable,
offset);
}
......@@ -1431,7 +1432,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
......@@ -1504,7 +1506,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ctx->ppgtt)
vm = &ctx->ppgtt->base;
else
vm = &dev_priv->ggtt.base;
vm = &ggtt->base;
memset(&params_master, 0x00, sizeof(params_master));
......@@ -1781,11 +1783,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
exec2_list = drm_malloc_gfp(args->buffer_count,
sizeof(*exec2_list),
GFP_TEMPORARY);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
......
This diff is collapsed.
......@@ -42,7 +42,7 @@ typedef uint64_t gen8_pde_t;
typedef uint64_t gen8_ppgtt_pdpe_t;
typedef uint64_t gen8_ppgtt_pml4e_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
......@@ -513,10 +513,9 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd);
}
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
int i915_ggtt_init_hw(struct drm_device *dev);
void i915_gem_init_ggtt(struct drm_device *dev);
void i915_ggtt_cleanup_hw(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
......
......@@ -28,6 +28,7 @@
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
......@@ -166,6 +167,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
obj->madv != I915_MADV_DONTNEED)
continue;
if (flags & I915_SHRINK_VMAPS &&
!is_vmalloc_addr(obj->mapping))
continue;
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
continue;
......@@ -246,7 +251,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
......@@ -288,35 +293,56 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return freed;
}
struct shrinker_lock_uninterruptible {
bool was_interruptible;
bool unlock;
};
static bool
i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu,
int timeout_ms)
{
unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return false;
if (--timeout == 0) {
pr_err("Unable to lock GPU to purge memory.\n");
return false;
}
}
slu->was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
return true;
}
static void
i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu)
{
dev_priv->mm.interruptible = slu->was_interruptible;
if (slu->unlock)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_device *dev = dev_priv->dev;
struct shrinker_lock_uninterruptible slu;
struct drm_i915_gem_object *obj;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
unsigned long pinned, bound, unbound, freed_pages;
bool was_interruptible;
bool unlock;
while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return NOTIFY_DONE;
}
if (timeout == 0) {
pr_err("Unable to purge GPU memory due lock contention.\n");
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
}
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible;
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
......@@ -341,8 +367,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
bound += obj->base.size;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
......@@ -356,6 +381,29 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
return NOTIFY_DONE;
}
static int
i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu;
unsigned long freed_pages;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
freed_pages = i915_gem_shrink(dev_priv, -1UL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE |
I915_SHRINK_VMAPS);
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
/**
* i915_gem_shrinker_init - Initialize i915 shrinker
* @dev_priv: i915 device
......@@ -371,6 +419,9 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
}
/**
......@@ -381,6 +432,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
*/
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
{
WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker);
}
This diff is collapsed.
......@@ -494,10 +494,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
ret = -ENOMEM;
pinned = 0;
pvec = kmalloc(npages*sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (pvec == NULL)
pvec = drm_malloc_ab(npages, sizeof(struct page *));
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
......@@ -634,14 +631,11 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = NULL;
pinned = 0;
if (obj->userptr.mm->mm == current->mm) {
pvec = kmalloc(num_pages*sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
GFP_TEMPORARY);
if (pvec == NULL) {
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false);
return -ENOMEM;
}
__i915_gem_userptr_set_active(obj, false);
return -ENOMEM;
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
......
......@@ -296,6 +296,7 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
}
}
err_printf(m, " seqno: 0x%08x\n", ring->seqno);
err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno);
err_printf(m, " waiting: %s\n", yesno(ring->waiting));
err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
......@@ -627,6 +628,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src,
struct i915_address_space *vm)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_error_object *dst;
struct i915_vma *vma = NULL;
int num_pages;
......@@ -653,7 +655,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
vma && (vma->bound & GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= dev_priv->ggtt.mappable_end);
reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
......@@ -663,12 +665,13 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->ggtt.mappable_end)
if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
goto unwind;
}
/* Cannot access snooped pages through the aperture */
if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
!HAS_LLC(dev_priv))
goto unwind;
dst->page_count = num_pages;
......@@ -689,7 +692,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read.
*/
s = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
s = io_mapping_map_atomic_wc(ggtt->mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
......@@ -883,7 +886,7 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
if (HAS_VEBOX(dev_priv->dev)) {
if (HAS_VEBOX(dev_priv)) {
ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(engine->mmio_base));
ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
......@@ -928,8 +931,9 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->waiting = waitqueue_active(&engine->irq_queue);
ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ering->seqno = engine->get_seqno(engine, false);
ering->acthd = intel_ring_get_active_head(engine);
ering->seqno = engine->get_seqno(engine);
ering->last_seqno = engine->last_submitted_seqno;
ering->start = I915_READ_START(engine);
ering->head = I915_READ_HEAD(engine);
ering->tail = I915_READ_TAIL(engine);
......@@ -1015,7 +1019,8 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request;
int i, count;
......@@ -1038,7 +1043,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
vm = request->ctx && request->ctx->ppgtt ?
&request->ctx->ppgtt->base :
&dev_priv->ggtt.base;
&ggtt->base;
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
......@@ -1049,7 +1054,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
request->batch_obj,
vm);
if (HAS_BROKEN_CS_TLB(dev_priv->dev))
if (HAS_BROKEN_CS_TLB(dev_priv))
error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
engine->scratch.obj);
......
......@@ -27,9 +27,12 @@
/* Definitions of GuC H/W registers, bits, etc */
#define GUC_STATUS _MMIO(0xc000)
#define GS_RESET_SHIFT 0
#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT)
#define GS_BOOTROM_SHIFT 1
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT)
#define GS_UKERNEL_SHIFT 8
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
......@@ -37,7 +40,13 @@
#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT)
#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT)
#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT)
#define GS_AUTH_STATUS_SHIFT 30
#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT)
#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT)
#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT)
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16
......
......@@ -1000,6 +1000,7 @@ static void notify_ring(struct intel_engine_cs *engine)
return;
trace_i915_gem_request_notify(engine);
engine->user_interrupts++;
wake_up_all(&engine->irq_queue);
}
......@@ -1218,7 +1219,7 @@ static void ivybridge_parity_work(struct work_struct *work)
i915_reg_t reg;
slice--;
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
break;
dev_priv->l3_parity.which_slice &= ~(1<<slice);
......@@ -1257,7 +1258,7 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irq(&dev_priv->irq_lock);
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&dev_priv->irq_lock);
mutex_unlock(&dev_priv->dev->struct_mutex);
......@@ -1323,7 +1324,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
notify_ring(engine);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
intel_lrc_irq_handler(engine);
tasklet_schedule(&engine->irq_tasklet);
}
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
......@@ -1626,7 +1627,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (INTEL_INFO(dev_priv)->gen >= 8)
return;
if (HAS_VEBOX(dev_priv->dev)) {
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VECS]);
......@@ -1828,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
for (;;) {
do {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
......@@ -1856,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
}
} while (0);
enable_rpm_wakeref_asserts(dev_priv);
......@@ -2805,8 +2806,8 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
static bool
ring_idle(struct intel_engine_cs *engine, u32 seqno)
{
return (list_empty(&engine->request_list) ||
i915_seqno_passed(seqno, engine->last_submitted_seqno));
return i915_seqno_passed(seqno,
READ_ONCE(engine->last_submitted_seqno));
}
static bool
......@@ -2828,7 +2829,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct intel_engine_cs *signaller;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
if (INTEL_INFO(dev_priv)->gen >= 8) {
for_each_engine(signaller, dev_priv) {
if (engine == signaller)
continue;
......@@ -2941,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1;
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
return 1;
/* cursory check for an unkickable deadlock */
......@@ -3054,6 +3055,24 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
return HANGCHECK_HUNG;
}
static unsigned kick_waiters(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = to_i915(engine->dev);
unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
if (engine->hangcheck.user_interrupts == user_interrupts &&
!test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
engine->name);
else
DRM_INFO("Fake missed irq on %s\n",
engine->name);
wake_up_all(&engine->irq_queue);
}
return user_interrupts;
}
/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
......@@ -3096,29 +3115,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
for_each_engine_id(engine, dev_priv, id) {
u64 acthd;
u32 seqno;
unsigned user_interrupts;
bool busy = true;
semaphore_clear_deadlocks(dev_priv);
seqno = engine->get_seqno(engine, false);
/* We don't strictly need an irq-barrier here, as we are not
* serving an interrupt request, be paranoid in case the
* barrier has side-effects (such as preventing a broken
* cacheline snoop) and so be sure that we can see the seqno
* advance. If the seqno should stick, due to a stale
* cacheline, we would erroneously declare the GPU hung.
*/
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
acthd = intel_ring_get_active_head(engine);
seqno = engine->get_seqno(engine);
/* Reset stuck interrupts between batch advances */
user_interrupts = 0;
if (engine->hangcheck.seqno == seqno) {
if (ring_idle(engine, seqno)) {
engine->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&engine->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
if (!(dev_priv->gpu_error.test_irq_rings & intel_engine_flag(engine)))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
engine->name);
else
DRM_INFO("Fake missed irq on %s\n",
engine->name);
wake_up_all(&engine->irq_queue);
}
/* Safeguard against driver failure */
user_interrupts = kick_waiters(engine);
engine->hangcheck.score += BUSY;
} else
busy = false;
......@@ -3169,7 +3192,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
engine->hangcheck.score = 0;
/* Clear head and subunit states on seqno movement */
engine->hangcheck.acthd = 0;
acthd = 0;
memset(engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
......@@ -3177,6 +3200,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
engine->hangcheck.seqno = seqno;
engine->hangcheck.acthd = acthd;
engine->hangcheck.user_interrupts = user_interrupts;
busy_count += busy;
}
......@@ -3500,6 +3524,26 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
PORTA_HOTPLUG_ENABLE;
DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
hotplug, enabled_irqs);
hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
/*
* For BXT invert bit has to be set based on AOB design
* for HPD detection logic, update it based on VBT fields.
*/
if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
hotplug |= BXT_DDIA_HPD_INVERT;
if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
hotplug |= BXT_DDIB_HPD_INVERT;
if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
hotplug |= BXT_DDIC_HPD_INVERT;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
......
......@@ -165,6 +165,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
#define GEN6_GRDOM_VECS (1 << 4)
#define GEN9_GRDOM_GUC (1 << 5)
#define GEN8_GRDOM_MEDIA2 (1 << 7)
#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
......@@ -627,6 +628,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IOSF_PORT_GPIO_SC 0x48
#define IOSF_PORT_GPIO_SUS 0xa8
#define IOSF_PORT_CCU 0xa9
#define CHV_IOSF_PORT_GPIO_N 0x13
#define CHV_IOSF_PORT_GPIO_SE 0x48
#define CHV_IOSF_PORT_GPIO_E 0xa8
#define CHV_IOSF_PORT_GPIO_SW 0xb2
#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
......@@ -791,6 +796,7 @@ enum skl_disp_power_wells {
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_CZ_CLOCK_CONTROL 0x62
#define CCK_GPLL_CLOCK_CONTROL 0x67
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c
#define CCK_TRUNK_FORCE_ON (1 << 17)
......@@ -1324,6 +1330,7 @@ enum skl_disp_power_wells {
#define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16)
#define PHY_RESERVED (1 << 7)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
_PORT_CL1CM_DW0_A)
......@@ -1783,6 +1790,18 @@ enum skl_disp_power_wells {
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100)
#define GEN9_STATE_ACK_TDL0 (1 << 12)
#define GEN9_STATE_ACK_TDL1 (1 << 13)
#define GEN9_STATE_ACK_TDL2 (1 << 14)
#define GEN9_STATE_ACK_TDL3 (1 << 15)
#define GEN9_SUBSLICE_TDL_ACK_BITS \
(GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \
GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0)
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c)
......@@ -4785,6 +4804,10 @@ enum skl_disp_power_wells {
#define CBR_PND_DEADLINE_DISABLE (1<<31)
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
#define CBR_DPLLBMD_PIPE_C (1<<29)
#define CBR_DPLLBMD_PIPE_B (1<<18)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
......@@ -6185,6 +6208,7 @@ enum skl_disp_power_wells {
/* digital port hotplug */
#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
#define BXT_DDIA_HPD_INVERT (1 << 27)
#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
......@@ -6200,6 +6224,7 @@ enum skl_disp_power_wells {
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12)
#define BXT_DDIC_HPD_INVERT (1 << 11)
#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
......@@ -6210,6 +6235,7 @@ enum skl_disp_power_wells {
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4)
#define BXT_DDIB_HPD_INVERT (1 << 3)
#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
......@@ -6219,6 +6245,9 @@ enum skl_disp_power_wells {
#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
BXT_DDIB_HPD_INVERT | \
BXT_DDIC_HPD_INVERT)
#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
#define PORTE_HOTPLUG_ENABLE (1 << 4)
......
......@@ -562,7 +562,7 @@ TRACE_EVENT(i915_gem_request_notify,
TP_fast_assign(
__entry->dev = engine->dev->primary->index;
__entry->ring = engine->id;
__entry->seqno = engine->get_seqno(engine, false);
__entry->seqno = engine->get_seqno(engine);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
......
......@@ -181,8 +181,8 @@ static int vgt_balloon_space(struct drm_mm *mm,
int intel_vgt_balloon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
......@@ -202,19 +202,19 @@ int intel_vgt_balloon(struct drm_device *dev)
DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
if (mappable_base < ggtt_vm->start ||
mappable_end > dev_priv->ggtt.mappable_end ||
unmappable_base < dev_priv->ggtt.mappable_end ||
unmappable_end > ggtt_vm_end) {
if (mappable_base < ggtt->base.start ||
mappable_end > ggtt->mappable_end ||
unmappable_base < ggtt->mappable_end ||
unmappable_end > ggtt_end) {
DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL;
}
/* Unmappable graphic memory ballooning */
if (unmappable_base > dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
if (unmappable_base > ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[2],
dev_priv->ggtt.mappable_end,
ggtt->mappable_end,
unmappable_base);
if (ret)
......@@ -225,30 +225,30 @@ int intel_vgt_balloon(struct drm_device *dev)
* No need to partition out the last physical page,
* because it is reserved to the guard page.
*/
if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
ret = vgt_balloon_space(&ggtt_vm->mm,
if (unmappable_end < ggtt_end - PAGE_SIZE) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[3],
unmappable_end,
ggtt_vm_end - PAGE_SIZE);
ggtt_end - PAGE_SIZE);
if (ret)
goto err;
}
/* Mappable graphic memory ballooning */
if (mappable_base > ggtt_vm->start) {
ret = vgt_balloon_space(&ggtt_vm->mm,
if (mappable_base > ggtt->base.start) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[0],
ggtt_vm->start, mappable_base);
ggtt->base.start, mappable_base);
if (ret)
goto err;
}
if (mappable_end < dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
if (mappable_end < ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[1],
mappable_end,
dev_priv->ggtt.mappable_end);
ggtt->mappable_end);
if (ret)
goto err;
......
......@@ -373,7 +373,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
if (WARN_ON(port == PORT_A))
return;
if (HAS_PCH_IBX(dev_priv->dev)) {
if (HAS_PCH_IBX(dev_priv)) {
aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
......
......@@ -1123,7 +1123,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
/* Parse the I_boost config for SKL and above */
if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
if (bdb->version >= 196 && child->common.iboost) {
info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level);
......@@ -1241,6 +1241,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
*/
memcpy(child_dev_ptr, p_child,
min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
/*
* copied full block, now init values when they are not
* available in current version
*/
if (bdb->version < 196) {
/* Set default values for bits added from v196 */
child_dev_ptr->common.iboost = 0;
child_dev_ptr->common.hpd_invert = 0;
}
if (bdb->version < 192)
child_dev_ptr->common.lspcon = 0;
}
return;
}
......@@ -1585,3 +1598,47 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
return false;
}
/**
* intel_bios_is_port_hpd_inverted - is HPD inverted for %port
* @dev_priv: i915 device instance
* @port: port to check
*
* Return true if HPD should be inverted for %port.
*/
bool
intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port)
{
int i;
if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
continue;
switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)
return true;
break;
case DVO_PORT_DPB:
case DVO_PORT_HDMIB:
if (port == PORT_B)
return true;
break;
case DVO_PORT_DPC:
case DVO_PORT_HDMIC:
if (port == PORT_C)
return true;
break;
default:
break;
}
}
return false;
}
......@@ -92,10 +92,10 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
}
/* Set up the pipe CSC unit. */
static void i9xx_load_csc_matrix(struct drm_crtc *crtc)
static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_crtc_state *crtc_state = crtc->state;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
......@@ -203,10 +203,10 @@ static void i9xx_load_csc_matrix(struct drm_crtc *crtc)
/*
* Set up the pipe CSC unit on CherryView.
*/
static void cherryview_load_csc_matrix(struct drm_crtc *crtc)
static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_crtc_state *state = crtc->state;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = to_intel_crtc(crtc)->pipe;
uint32_t mode;
......@@ -252,13 +252,13 @@ static void cherryview_load_csc_matrix(struct drm_crtc *crtc)
I915_WRITE(CGM_PIPE_MODE(pipe), mode);
}
void intel_color_set_csc(struct drm_crtc *crtc)
void intel_color_set_csc(struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc_state->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->display.load_csc_matrix)
dev_priv->display.load_csc_matrix(crtc);
dev_priv->display.load_csc_matrix(crtc_state);
}
/* Loads the legacy palette/gamma unit for the CRTC. */
......@@ -303,19 +303,20 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
}
}
static void i9xx_load_luts(struct drm_crtc *crtc)
static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
{
i9xx_load_luts_internal(crtc, crtc->state->gamma_lut);
i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
}
/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
static void haswell_load_luts(struct drm_crtc *crtc)
static void haswell_load_luts(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_crtc_state =
to_intel_crtc_state(crtc->state);
to_intel_crtc_state(crtc_state);
bool reenable_ips = false;
/*
......@@ -331,24 +332,24 @@ static void haswell_load_luts(struct drm_crtc *crtc)
intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
i9xx_load_luts(crtc);
i9xx_load_luts(crtc_state);
if (reenable_ips)
hsw_enable_ips(intel_crtc);
}
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
static void broadwell_load_luts(struct drm_crtc *crtc)
static void broadwell_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_crtc_state *state = crtc->state;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
if (crtc_state_is_legacy(state)) {
haswell_load_luts(crtc);
haswell_load_luts(state);
return;
}
......@@ -421,11 +422,11 @@ static void broadwell_load_luts(struct drm_crtc *crtc)
}
/* Loads the palette/gamma unit for the CRTC on CherryView. */
static void cherryview_load_luts(struct drm_crtc *crtc)
static void cherryview_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc_state *state = crtc->state;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
......@@ -481,16 +482,12 @@ static void cherryview_load_luts(struct drm_crtc *crtc)
i9xx_load_luts_internal(crtc, NULL);
}
void intel_color_load_luts(struct drm_crtc *crtc)
void intel_color_load_luts(struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc_state->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
/* The clocks have to be on to load the palette. */
if (!crtc->state->active)
return;
dev_priv->display.load_luts(crtc);
dev_priv->display.load_luts(crtc_state);
}
int intel_color_check(struct drm_crtc *crtc,
......
......@@ -315,6 +315,9 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
*dig_port = enc_to_mst(encoder)->primary;
*port = (*dig_port)->port;
break;
default:
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
/* fallthrough and treat as unknown */
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
......@@ -326,9 +329,6 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
*dig_port = NULL;
*port = PORT_E;
break;
default:
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
break;
}
}
......@@ -629,6 +629,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
break;
}
rx_ctl_val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
POSTING_READ(FDI_RX_CTL(PIPE_A));
temp = I915_READ(DDI_BUF_CTL(PORT_E));
temp &= ~DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
......@@ -643,10 +647,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
rx_ctl_val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
POSTING_READ(FDI_RX_CTL(PIPE_A));
/* Reset FDI_RX_MISC pwrdn lanes */
temp = I915_READ(FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
......@@ -1726,18 +1726,31 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
enum port port;
uint32_t val;
u32 ports, val;
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val |= GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
/* Considering 10ms timeout until BSpec is updated */
if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10))
/*
* The PHY registers start out inaccessible and respond to reads with
* all 1s. Eventually they become accessible as they power up, then
* the reserved bit will give the default 0. Poll on the reserved bit
* becoming 0 to find when the PHY is accessible.
* HW team confirmed that the time to reach phypowergood status is
* anywhere between 50 us and 100us.
*/
if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
(PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
DRM_ERROR("timeout during PHY%d power on\n", phy);
}
if (phy == DPIO_PHY0)
ports = BIT(PORT_B) | BIT(PORT_C);
else
ports = BIT(PORT_A);
for (port = (phy == DPIO_PHY0 ? PORT_B : PORT_A);
port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) {
for_each_port_masked(port, ports) {
int lane;
for (lane = 0; lane < 4; lane++) {
......@@ -1898,12 +1911,18 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
uint32_t val;
intel_ddi_post_disable(intel_encoder);
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
* and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
val = I915_READ(FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
intel_ddi_post_disable(intel_encoder);
val = I915_READ(FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
......
This diff is collapsed.
This diff is collapsed.
......@@ -89,14 +89,16 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
mutex_lock(&dev_priv->dpll_lock);
WARN_ON(!pll->config.crtc_mask);
if (pll->active_mask == 0) {
if (!pll->active_mask) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
pll->funcs.mode_set(dev_priv, pll);
}
mutex_unlock(&dev_priv->dpll_lock);
}
/**
......@@ -113,14 +115,17 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
unsigned old_mask = pll->active_mask;
unsigned old_mask;
if (WARN_ON(pll == NULL))
return;
mutex_lock(&dev_priv->dpll_lock);
old_mask = pll->active_mask;
if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) ||
WARN_ON(pll->active_mask & crtc_mask))
return;
goto out;
pll->active_mask |= crtc_mask;
......@@ -131,13 +136,16 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
if (old_mask) {
WARN_ON(!pll->on);
assert_shared_dpll_enabled(dev_priv, pll);
return;
goto out;
}
WARN_ON(pll->on);
DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->funcs.enable(dev_priv, pll);
pll->on = true;
out:
mutex_unlock(&dev_priv->dpll_lock);
}
void intel_disable_shared_dpll(struct intel_crtc *crtc)
......@@ -154,8 +162,9 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
if (pll == NULL)
return;
mutex_lock(&dev_priv->dpll_lock);
if (WARN_ON(!(pll->active_mask & crtc_mask)))
return;
goto out;
DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
pll->name, pll->active_mask, pll->on,
......@@ -166,11 +175,14 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
pll->active_mask &= ~crtc_mask;
if (pll->active_mask)
return;
goto out;
DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->funcs.disable(dev_priv, pll);
pll->on = false;
out:
mutex_unlock(&dev_priv->dpll_lock);
}
static struct intel_shared_dpll *
......@@ -286,7 +298,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
u32 val;
bool enabled;
I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
......@@ -1284,7 +1296,15 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_REF_SEL;
/*
* Definition of each bit polarity has been changed
* after A1 stepping
*/
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
temp &= ~PORT_PLL_REF_SEL;
else
temp |= PORT_PLL_REF_SEL;
/* Non-SSC reference */
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
......@@ -1750,6 +1770,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
dev_priv->dpll_mgr = dpll_mgr;
dev_priv->num_shared_dpll = i;
mutex_init(&dev_priv->dpll_lock);
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
......
......@@ -796,7 +796,9 @@ struct intel_dp {
uint32_t DP;
int link_rate;
uint8_t lane_count;
uint8_t sink_count;
bool has_audio;
bool detect_done;
enum hdmi_force_audio force_audio;
bool limited_color_range;
bool color_range_auto;
......@@ -1102,6 +1104,8 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
extern const struct drm_plane_funcs intel_plane_funcs;
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
......@@ -1669,7 +1673,7 @@ extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
/* intel_color.c */
void intel_color_init(struct drm_crtc *crtc);
int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void intel_color_set_csc(struct drm_crtc *crtc);
void intel_color_load_luts(struct drm_crtc *crtc);
void intel_color_set_csc(struct drm_crtc_state *crtc_state);
void intel_color_load_luts(struct drm_crtc_state *crtc_state);
#endif /* __INTEL_DRV_H__ */
......@@ -46,6 +46,24 @@ static const struct {
},
};
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
{
/* It just so happens the VBT matches register contents. */
switch (fmt) {
case VID_MODE_FORMAT_RGB888:
return MIPI_DSI_FMT_RGB888;
case VID_MODE_FORMAT_RGB666:
return MIPI_DSI_FMT_RGB666;
case VID_MODE_FORMAT_RGB666_PACKED:
return MIPI_DSI_FMT_RGB666_PACKED;
case VID_MODE_FORMAT_RGB565:
return MIPI_DSI_FMT_RGB565;
default:
MISSING_CASE(fmt);
return MIPI_DSI_FMT_RGB666;
}
}
static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
......@@ -740,14 +758,74 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
return active;
}
static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
unsigned int bpp, fmt;
enum port port;
u16 vfp, vsync, vbp;
/*
* Atleast one port is active as encoder->get_config called only if
* encoder->get_hw_state() returns true.
*/
for_each_dsi_port(port, intel_dsi->ports) {
if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
break;
}
fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
pipe_config->pipe_bpp =
mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
bpp = pipe_config->pipe_bpp;
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
adjusted_mode->crtc_vdisplay =
I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
/*
* TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and
* calculate hsync_start, hsync_end, htotal and hblank_end
*/
/* vertical values are in terms of lines */
vfp = I915_READ(MIPI_VFP_COUNT(port));
vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
vbp = I915_READ(MIPI_VBP_COUNT(port));
adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
adjusted_mode->crtc_vsync_start =
vfp + adjusted_mode->crtc_vdisplay;
adjusted_mode->crtc_vsync_end =
vsync + adjusted_mode->crtc_vsync_start;
adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
u32 pclk;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
if (IS_BROXTON(dev))
bxt_dsi_get_pipe_config(encoder, pipe_config);
/*
* DPLL_MD is not used in case of DSI, reading will get some default value
* set dpll_md = 0
......
......@@ -134,5 +134,6 @@ extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
#endif /* _INTEL_DSI_H */
......@@ -58,50 +58,41 @@ static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
#define NS_KHZ_RATIO 1000000
#define GPI0_NC_0_HV_DDI0_HPD 0x4130
#define GPIO_NC_0_HV_DDI0_PAD 0x4138
#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128
#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118
#define GPIO_NC_3_PANEL0_VDDEN 0x4140
#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148
#define GPIO_NC_4_PANEL0_BLKEN 0x4150
#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158
#define GPIO_NC_5_PANEL0_BLKCTL 0x4160
#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168
#define GPIO_NC_6_PCONF0 0x4180
#define GPIO_NC_6_PAD 0x4188
#define GPIO_NC_7_PCONF0 0x4190
#define GPIO_NC_7_PAD 0x4198
#define GPIO_NC_8_PCONF0 0x4170
#define GPIO_NC_8_PAD 0x4178
#define GPIO_NC_9_PCONF0 0x4100
#define GPIO_NC_9_PAD 0x4108
#define GPIO_NC_10_PCONF0 0x40E0
#define GPIO_NC_10_PAD 0x40E8
#define GPIO_NC_11_PCONF0 0x40F0
#define GPIO_NC_11_PAD 0x40F8
struct gpio_table {
u16 function_reg;
u16 pad_reg;
u8 init;
/* base offsets for gpio pads */
#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140
#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150
#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160
#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180
#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190
#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170
#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100
#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0
#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0
#define VLV_GPIO_PCONF0(base_offset) (base_offset)
#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8)
struct gpio_map {
u16 base_offset;
bool init;
};
static struct gpio_table gtable[] = {
{ GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
{ GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
{ GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
{ GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
{ GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
{ GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
{ GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
{ GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
{ GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
{ GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
{ GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
static struct gpio_map vlv_gpio_table[] = {
{ VLV_GPIO_NC_0_HV_DDI0_HPD },
{ VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
{ VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
{ VLV_GPIO_NC_3_PANEL0_VDDEN },
{ VLV_GPIO_NC_4_PANEL0_BKLTEN },
{ VLV_GPIO_NC_5_PANEL0_BKLTCTL },
{ VLV_GPIO_NC_6_HV_DDI1_HPD },
{ VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
{ VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
{ VLV_GPIO_NC_9_PANEL1_VDDEN },
{ VLV_GPIO_NC_10_PANEL1_BKLTEN },
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
};
static inline enum port intel_dsi_seq_port_to_port(u8 port)
......@@ -196,56 +187,76 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
u8 gpio_source, u8 gpio_index, bool value)
{
u8 gpio, action;
u16 function, pad;
u32 val;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
struct gpio_map *map;
u16 pconf0, padval;
u32 tmp;
u8 port;
gpio = *data++;
/* pull up/down */
action = *data++ & 1;
if (gpio >= ARRAY_SIZE(gtable)) {
DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
goto out;
if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
return;
}
if (!IS_VALLEYVIEW(dev_priv)) {
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
goto out;
}
map = &vlv_gpio_table[gpio_index];
if (dev_priv->vbt.dsi.seq_version >= 3) {
DRM_DEBUG_KMS("GPIO element v3 not supported\n");
goto out;
return;
} else {
if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) {
port = IOSF_PORT_GPIO_SC;
} else {
DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
return;
}
}
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
pconf0 = VLV_GPIO_PCONF0(map->base_offset);
padval = VLV_GPIO_PAD_VAL(map->base_offset);
mutex_lock(&dev_priv->sb_lock);
if (!gtable[gpio].init) {
/* program the function */
if (!map->init) {
/* FIXME: remove constant below */
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
0x2000CC00);
gtable[gpio].init = 1;
vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
map->init = true;
}
val = 0x4 | action;
tmp = 0x4 | value;
vlv_iosf_sb_write(dev_priv, port, padval, tmp);
mutex_unlock(&dev_priv->sb_lock);
}
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u8 gpio_source, gpio_index;
bool value;
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
gpio_index = *data++;
/* gpio source in sequence v2 only */
if (dev_priv->vbt.dsi.seq_version == 2)
gpio_source = (*data >> 1) & 3;
else
gpio_source = 0;
/* pull up/down */
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
mutex_unlock(&dev_priv->sb_lock);
value = *data++ & 1;
if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
out:
return data;
}
......@@ -412,25 +423,6 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
.get_modes = vbt_panel_get_modes,
};
/* XXX: This should be done when parsing the VBT in intel_bios.c */
static enum mipi_dsi_pixel_format pixel_format_from_vbt(u32 fmt)
{
/* It just so happens the VBT matches register contents. */
switch (fmt) {
case VID_MODE_FORMAT_RGB888:
return MIPI_DSI_FMT_RGB888;
case VID_MODE_FORMAT_RGB666:
return MIPI_DSI_FMT_RGB666;
case VID_MODE_FORMAT_RGB666_PACKED:
return MIPI_DSI_FMT_RGB666_PACKED;
case VID_MODE_FORMAT_RGB565:
return MIPI_DSI_FMT_RGB565;
default:
MISSING_CASE(fmt);
return MIPI_DSI_FMT_RGB666;
}
}
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
......@@ -455,7 +447,9 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format = pixel_format_from_vbt(mipi_config->videomode_color_format << 7);
intel_dsi->pixel_format =
pixel_format_from_register_bits(
mipi_config->videomode_color_format << 7);
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
intel_dsi->dual_link = mipi_config->dual_link;
......
......@@ -506,6 +506,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
int size,
int fb_cpp)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int compression_threshold = 1;
int ret;
u64 end;
......@@ -516,9 +517,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = dev_priv->ggtt.stolen_size - 8 * 1024 * 1024;
end = ggtt->stolen_size - 8 * 1024 * 1024;
else
end = dev_priv->ggtt.stolen_usable_size;
end = ggtt->stolen_usable_size;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
......
......@@ -122,6 +122,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj = NULL;
int size, ret;
......@@ -146,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
if (size * 2 < dev_priv->ggtt.stolen_usable_size)
if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
......@@ -181,7 +182,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
container_of(helper, struct intel_fbdev, helper);
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
......@@ -244,13 +246,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->ggtt.mappable_end;
info->apertures->ranges[0].size = ggtt->mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
ioremap_wc(dev_priv->ggtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
......@@ -808,8 +810,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
async_synchronize_full();
if (dev_priv->fbdev)
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
......@@ -821,7 +821,6 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct drm_fb_helper *fb_helper;
async_synchronize_full();
if (!ifbdev)
return;
......
......@@ -333,7 +333,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
old = !intel_crtc->pch_fifo_underrun_disabled;
intel_crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv->dev))
if (HAS_PCH_IBX(dev_priv))
ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
enable);
else
......@@ -363,7 +363,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
return;
/* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
if (HAS_GMCH_DISPLAY(dev_priv) &&
to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
return;
......
......@@ -353,6 +353,24 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
return ret;
}
static int i915_reset_guc(struct drm_i915_private *dev_priv)
{
int ret;
u32 guc_status;
ret = intel_guc_reset(dev_priv);
if (ret) {
DRM_ERROR("GuC reset failed, ret = %d\n", ret);
return ret;
}
guc_status = I915_READ(GUC_STATUS);
WARN(!(guc_status & GS_MIA_IN_RESET),
"GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
return ret;
}
/**
* intel_guc_ucode_load() - load GuC uCode into the device
* @dev: drm device
......@@ -369,7 +387,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
int err = 0;
int retries, err = 0;
if (!i915.enable_guc_submission)
return 0;
......@@ -417,9 +435,33 @@ int intel_guc_ucode_load(struct drm_device *dev)
if (err)
goto fail;
err = guc_ucode_xfer(dev_priv);
if (err)
goto fail;
/*
* WaEnableuKernelHeaderValidFix:skl,bxt
* For BXT, this is only upto B0 but below WA is required for later
* steppings also so this is extended as well.
*/
/* WaEnableGuCBootHashCheckNotSet:skl,bxt */
for (retries = 3; ; ) {
/*
* Always reset the GuC just before (re)loading, so
* that the state and timing are fairly predictable
*/
err = i915_reset_guc(dev_priv);
if (err) {
DRM_ERROR("GuC reset failed, err %d\n", err);
goto fail;
}
err = guc_ucode_xfer(dev_priv);
if (!err)
break;
if (--retries == 0)
goto fail;
DRM_INFO("GuC fw load failed, err %d; will reset and "
"retry %d more time(s)\n", err, retries);
}
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
......@@ -440,6 +482,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
return 0;
fail:
DRM_ERROR("GuC firmware load failed, err %d\n", err);
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
......
......@@ -638,7 +638,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv->dev))
else if (HAS_PCH_SPLIT(dev_priv))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return false;
......
This diff is collapsed.
This diff is collapsed.
......@@ -118,7 +118,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
void intel_lrc_irq_handler(struct intel_engine_cs *engine);
void intel_execlists_retire_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment