Commit 35eecf05 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Definitely seems quieter this week,

  Radeon, intel, intel broadwell, vmwgfx, ttm, armada, and a couple of
  core fixes, one revert in radeon

  Most of these are either going to stable or fixes for things
  introduced in the merge window"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (30 commits)
  drm/edid: add quirk for BPC in Samsung NP700G7A-S01PL notebook
  drm/ttm: Fix accesses through vmas with only partial coverage
  drm/nouveau: only runtime suspend by default in optimus configuration
  drm: don't double-free on driver load error
  Revert "drm/radeon: Implement radeon_pci_shutdown"
  drm/radeon: add missing display tiling setup for oland
  drm/radeon: fix typo in cik_copy_dma
  drm/radeon/cik: plug in missing blit callback
  drm/radeon/dpm: Fix hwmon crash
  drm/radeon: Fix sideport problems on certain RS690 boards
  drm/i915: don't update the dri1 breadcrumb with modesetting
  DRM: Armada: prime refcounting bug fix
  DRM: Armada: fix printing of phys_addr_t/dma_addr_t
  DRM: Armada: destroy framebuffer after helper
  DRM: Armada: implement lastclose() for fbhelper
  drm/i915: Repeat eviction search after idling the GPU
  drm/vmwgfx: Add max surface memory param
  drm/i915: Fix use-after-free in do_switch
  drm/i915: fix pm init ordering
  drm/i915: Hold mutex across i915_gem_release
  ...
parents 8b803841 49d45a31
...@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *, ...@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
int armada_fbdev_init(struct drm_device *); int armada_fbdev_init(struct drm_device *);
void armada_fbdev_lastclose(struct drm_device *);
void armada_fbdev_fini(struct drm_device *); void armada_fbdev_fini(struct drm_device *);
int armada_overlay_plane_create(struct drm_device *, unsigned long); int armada_overlay_plane_create(struct drm_device *, unsigned long);
......
...@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = { ...@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
DRM_UNLOCKED), DRM_UNLOCKED),
}; };
static void armada_drm_lastclose(struct drm_device *dev)
{
armada_fbdev_lastclose(dev);
}
static const struct file_operations armada_drm_fops = { static const struct file_operations armada_drm_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.llseek = no_llseek, .llseek = no_llseek,
...@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = { ...@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
.open = NULL, .open = NULL,
.preclose = NULL, .preclose = NULL,
.postclose = NULL, .postclose = NULL,
.lastclose = NULL, .lastclose = armada_drm_lastclose,
.unload = armada_drm_unload, .unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_count, .get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank, .enable_vblank = armada_drm_enable_vblank,
......
...@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh, ...@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
dfb->fb.width, dfb->fb.height, dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
dfb->fb.bits_per_pixel, obj->phys_addr); (unsigned long long)obj->phys_addr);
return 0; return 0;
...@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev) ...@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
return ret; return ret;
} }
void armada_fbdev_lastclose(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
drm_modeset_lock_all(dev);
if (priv->fbdev)
drm_fb_helper_restore_fbdev_mode(priv->fbdev);
drm_modeset_unlock_all(dev);
}
void armada_fbdev_fini(struct drm_device *dev) void armada_fbdev_fini(struct drm_device *dev)
{ {
struct armada_private *priv = dev->dev_private; struct armada_private *priv = dev->dev_private;
...@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev) ...@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
framebuffer_release(info); framebuffer_release(info);
} }
drm_fb_helper_fini(fbh);
if (fbh->fb) if (fbh->fb)
fbh->fb->funcs->destroy(fbh->fb); fbh->fb->funcs->destroy(fbh->fb);
drm_fb_helper_fini(fbh);
priv->fbdev = NULL; priv->fbdev = NULL;
} }
} }
...@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) ...@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
obj->dev_addr = obj->linear->start; obj->dev_addr = obj->linear->start;
} }
DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
obj, obj->phys_addr, obj->dev_addr); (unsigned long long)obj->phys_addr,
(unsigned long long)obj->dev_addr);
return 0; return 0;
} }
...@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) ...@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
* refcount on the gem object itself. * refcount on the gem object itself.
*/ */
drm_gem_object_reference(obj); drm_gem_object_reference(obj);
dma_buf_put(buf);
return obj; return obj;
} }
} }
...@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) ...@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
} }
dobj->obj.import_attach = attach; dobj->obj.import_attach = attach;
get_dma_buf(buf);
/* /*
* Don't call dma_buf_map_attachment() here - it maps the * Don't call dma_buf_map_attachment() here - it maps the
......
...@@ -68,6 +68,8 @@ ...@@ -68,6 +68,8 @@
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */ /* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
/* Force 8bpc */
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
struct detailed_mode_closure { struct detailed_mode_closure {
struct drm_connector *connector; struct drm_connector *connector;
...@@ -128,6 +130,9 @@ static struct edid_quirk { ...@@ -128,6 +130,9 @@ static struct edid_quirk {
/* Medion MD 30217 PG */ /* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
}; };
/* /*
...@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) ...@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info); drm_add_display_info(edid, &connector->display_info);
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
return num_modes; return num_modes;
} }
EXPORT_SYMBOL(drm_add_edid_modes); EXPORT_SYMBOL(drm_add_edid_modes);
......
...@@ -566,11 +566,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) ...@@ -566,11 +566,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (dev->driver->unload) if (dev->driver->unload)
dev->driver->unload(dev); dev->driver->unload(dev);
err_primary_node: err_primary_node:
drm_put_minor(dev->primary); drm_unplug_minor(dev->primary);
err_render_node: err_render_node:
drm_put_minor(dev->render); drm_unplug_minor(dev->render);
err_control_node: err_control_node:
drm_put_minor(dev->control); drm_unplug_minor(dev->control);
err_agp: err_agp:
if (dev->driver->bus->agp_destroy) if (dev->driver->bus->agp_destroy)
dev->driver->bus->agp_destroy(dev); dev->driver->bus->agp_destroy(dev);
......
...@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev) ...@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv; struct drm_i915_master_private *master_priv;
/*
* The dri breadcrumb update races against the drm master disappearing.
* Instead of trying to fix this (this is by far not the only ums issue)
* just don't do the update in kms mode.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (dev->primary->master) { if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv; master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv) if (master_priv->sarea_priv)
...@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock); spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->pc8.lock); intel_pm_setup(dev);
dev_priv->pc8.requirements_met = false;
dev_priv->pc8.gpu_idle = false;
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
intel_display_crc_init(dev); intel_display_crc_init(dev);
...@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
} }
intel_irq_init(dev); intel_irq_init(dev);
intel_pm_init(dev);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev);
/* Try to make sure MCHBAR is enabled before poking at it */ /* Try to make sure MCHBAR is enabled before poking at it */
...@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev) ...@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{ {
mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv); i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv); i915_gem_release(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
} }
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
......
...@@ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) ...@@ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_modeset_init_hw(dev); intel_modeset_init_hw(dev);
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, true); intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
......
...@@ -1755,8 +1755,13 @@ struct drm_i915_file_private { ...@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0C00) ((dev)->pdev->device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \ #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
(((dev)->pdev->device & 0xf) == 0x2 || \
((dev)->pdev->device & 0xf) == 0x6 || \
((dev)->pdev->device & 0xf) == 0xe))
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0A00) ((dev)->pdev->device & 0xFF00) == 0x0A00)
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020) ((dev)->pdev->device & 0x00F0) == 0x0020)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
...@@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev); ...@@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged); void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev); extern void intel_irq_init(struct drm_device *dev);
extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev); extern void intel_hpd_init(struct drm_device *dev);
extern void intel_pm_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev); extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev); extern void intel_uncore_early_sanitize(struct drm_device *dev);
......
...@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) ...@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
mutex_lock(&dev->struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr); idr_destroy(&file_priv->context_idr);
mutex_unlock(&dev->struct_mutex);
} }
static struct i915_hw_context * static struct i915_hw_context *
...@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to) ...@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
if (ret) if (ret)
return ret; return ret;
/* Clear this page out of any CPU caches for coherent swap-in/out. Note /*
* Pin can switch back to the default context if we end up calling into
* evict_everything - as a last ditch gtt defrag effort that also
* switches to the default context. Hence we need to reload from here.
*/
from = ring->last_context;
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu * that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list * write domains when putting a context object onto the active list
* (when switching away from it), this won't block. * (when switching away from it), this won't block.
* XXX: We need a real interface to do this instead of trickery. */ *
* XXX: We need a real interface to do this instead of trickery.
*/
ret = i915_gem_object_set_to_gtt_domain(to->obj, false); ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) { if (ret) {
i915_gem_object_unpin(to->obj); i915_gem_object_unpin(to->obj);
......
...@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
} else } else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
search_again:
/* First see if there is a large enough contiguous idle region... */ /* First see if there is a large enough contiguous idle region... */
list_for_each_entry(vma, &vm->inactive_list, mm_list) { list_for_each_entry(vma, &vm->inactive_list, mm_list) {
if (mark_free(vma, &unwind_list)) if (mark_free(vma, &unwind_list))
...@@ -115,10 +116,17 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -115,10 +116,17 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
list_del_init(&vma->exec_list); list_del_init(&vma->exec_list);
} }
/* We expect the caller to unpin, evict all and try again, or give up. /* Can we unpin some objects such as idle hw contents,
* So calling i915_gem_evict_vm() is unnecessary. * or pending flips?
*/ */
return -ENOSPC; ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
if (ret)
return ret;
/* Only idle the GPU and repeat the search once */
i915_gem_retire_requests(dev);
nonblocking = true;
goto search_again;
found: found:
/* drm_mm doesn't allow any other other operations while /* drm_mm doesn't allow any other other operations while
......
...@@ -337,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) ...@@ -337,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
kfree(ppgtt->gen8_pt_dma_addr[i]); kfree(ppgtt->gen8_pt_dma_addr[i]);
} }
__free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
__free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
} }
/** /**
...@@ -1241,6 +1241,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) ...@@ -1241,6 +1241,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl) if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl; bdw_gmch_ctl = 1 << bdw_gmch_ctl;
if (bdw_gmch_ctl > 4) {
WARN_ON(!i915_preliminary_hw_support);
return 4<<20;
}
return bdw_gmch_ctl << 20; return bdw_gmch_ctl << 20;
} }
......
...@@ -9135,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev, ...@@ -9135,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp); PIPE_CONF_CHECK_I(pipe_bpp);
if (!IS_HASWELL(dev)) { if (!HAS_DDI(dev)) {
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
} }
...@@ -11036,8 +11036,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, ...@@ -11036,8 +11036,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
} }
intel_modeset_check_state(dev); intel_modeset_check_state(dev);
drm_mode_config_reset(dev);
} }
void intel_modeset_gem_init(struct drm_device *dev) void intel_modeset_gem_init(struct drm_device *dev)
...@@ -11046,7 +11044,10 @@ void intel_modeset_gem_init(struct drm_device *dev) ...@@ -11046,7 +11044,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev); intel_setup_overlay(dev);
drm_modeset_lock_all(dev);
drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false); intel_modeset_setup_hw_state(dev, false);
drm_modeset_unlock_all(dev);
} }
void intel_modeset_cleanup(struct drm_device *dev) void intel_modeset_cleanup(struct drm_device *dev)
......
...@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane, ...@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
uint32_t sprite_width, int pixel_size, uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled); bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev); void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
bool intel_fbc_enabled(struct drm_device *dev); bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev); void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
......
...@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev, ...@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
spin_lock_irqsave(&dev_priv->backlight.lock, flags); spin_lock_irqsave(&dev_priv->backlight.lock, flags);
if (HAS_PCH_SPLIT(dev)) { if (IS_BROADWELL(dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
} else if (HAS_PCH_SPLIT(dev)) {
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else { } else {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
...@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev, ...@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
return val; return val;
} }
static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
}
static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, ...@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(dev, pipe, level); level = intel_panel_compute_brightness(dev, pipe, level);
if (HAS_PCH_SPLIT(dev)) if (IS_BROADWELL(dev))
return intel_bdw_panel_set_backlight(dev, level);
else if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level); return intel_pch_panel_set_backlight(dev, level);
if (is_backlight_combination_mode(dev)) { if (is_backlight_combination_mode(dev)) {
...@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector) ...@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
POSTING_READ(reg); POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE); I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
if (HAS_PCH_SPLIT(dev) && if (IS_BROADWELL(dev)) {
/*
* Broadwell requires PCH override to drive the PCH
* backlight pin. The above will configure the CPU
* backlight pin, which we don't plan to use.
*/
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
} else if (HAS_PCH_SPLIT(dev) &&
!(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1); tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE; tmp |= BLM_PCH_PWM_ENABLE;
......
...@@ -5685,6 +5685,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) ...@@ -5685,6 +5685,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested; bool is_enabled, enable_requested;
unsigned long irqflags;
uint32_t tmp; uint32_t tmp;
tmp = I915_READ(HSW_PWR_WELL_DRIVER); tmp = I915_READ(HSW_PWR_WELL_DRIVER);
...@@ -5702,9 +5703,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) ...@@ -5702,9 +5703,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
HSW_PWR_WELL_STATE_ENABLED), 20)) HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n"); DRM_ERROR("Timeout enabling power well\n");
} }
if (IS_BROADWELL(dev)) {
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
dev_priv->de_irq_mask[PIPE_B]);
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
~dev_priv->de_irq_mask[PIPE_B] |
GEN8_PIPE_VBLANK);
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
dev_priv->de_irq_mask[PIPE_C]);
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
~dev_priv->de_irq_mask[PIPE_C] |
GEN8_PIPE_VBLANK);
POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
} else { } else {
if (enable_requested) { if (enable_requested) {
unsigned long irqflags;
enum pipe p; enum pipe p;
I915_WRITE(HSW_PWR_WELL_DRIVER, 0); I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
...@@ -6130,10 +6146,19 @@ int vlv_freq_opcode(int ddr_freq, int val) ...@@ -6130,10 +6146,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
return val; return val;
} }
void intel_pm_init(struct drm_device *dev) void intel_pm_setup(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->pc8.lock);
dev_priv->pc8.requirements_met = false;
dev_priv->pc8.gpu_idle = false;
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work); intel_gen6_powersave_work);
} }
...@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) ...@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
} else if (IS_GEN6(ring->dev)) { } else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base); mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else { } else {
/* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base); mmio = RING_HWS_PGA(ring->mmio_base);
} }
......
...@@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev) ...@@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev) int intel_gpu_reset(struct drm_device *dev)
{ {
switch (INTEL_INFO(dev)->gen) { switch (INTEL_INFO(dev)->gen) {
case 8:
case 7: case 7:
case 6: return gen6_do_reset(dev); case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev); case 5: return ironlake_do_reset(dev);
......
...@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev) ...@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
if (nouveau_runtime_pm == 0) if (nouveau_runtime_pm == 0)
return -EINVAL; return -EINVAL;
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
return -EINVAL;
}
nv_debug_level(SILENT); nv_debug_level(SILENT);
drm_kms_helper_poll_disable(drm_dev); drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
......
...@@ -1196,7 +1196,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -1196,7 +1196,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
} else if ((rdev->family == CHIP_TAHITI) || } else if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN)) (rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
else if (rdev->family == CHIP_VERDE) else if ((rdev->family == CHIP_VERDE) ||
(rdev->family == CHIP_OLAND) ||
(rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
switch (radeon_crtc->crtc_id) { switch (radeon_crtc->crtc_id) {
......
...@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev, ...@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, 0); /* src/dst endian swap */ radeon_ring_write(ring, 0); /* src/dst endian swap */
radeon_ring_write(ring, src_offset & 0xffffffff); radeon_ring_write(ring, src_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
radeon_ring_write(ring, dst_offset & 0xfffffffc); radeon_ring_write(ring, dst_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
src_offset += cur_size_in_bytes; src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes;
......
...@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = { ...@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode, .hdmi_setmode = &evergreen_hdmi_setmode,
}, },
.copy = { .copy = {
.blit = NULL, .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma, .dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
...@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = { ...@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode, .hdmi_setmode = &evergreen_hdmi_setmode,
}, },
.copy = { .copy = {
.blit = NULL, .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma, .dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
......
...@@ -508,15 +508,6 @@ static const struct file_operations radeon_driver_kms_fops = { ...@@ -508,15 +508,6 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif #endif
}; };
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
radeon_driver_unload_kms(dev);
}
static struct drm_driver kms_driver = { static struct drm_driver kms_driver = {
.driver_features = .driver_features =
DRIVER_USE_AGP | DRIVER_USE_AGP |
...@@ -586,7 +577,6 @@ static struct pci_driver radeon_kms_pci_driver = { ...@@ -586,7 +577,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.probe = radeon_pci_probe, .probe = radeon_pci_probe,
.remove = radeon_pci_remove, .remove = radeon_pci_remove,
.driver.pm = &radeon_pm_ops, .driver.pm = &radeon_pm_ops,
.shutdown = radeon_pci_shutdown,
}; };
static int __init radeon_init(void) static int __init radeon_init(void)
......
...@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev) ...@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16; base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
/* Some boards seem to be configured for 128MB of sideport memory,
* but really only have 64MB. Just skip the sideport and use
* UMA memory.
*/
if (rdev->mc.igp_sideport_enabled &&
(rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
base += 128 * 1024 * 1024;
rdev->mc.real_vram_size -= 128 * 1024 * 1024;
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
/* Use K8 direct mapping for fast fb access. */ /* Use K8 direct mapping for fast fb access. */
rdev->fastfb_working = false; rdev->fastfb_working = false;
......
...@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
page_last = vma_pages(vma) + page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; drm_vma_node_start(&bo->vma_node);
if (unlikely(page_offset >= bo->num_pages)) { if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS; retval = VM_FAULT_SIGBUS;
......
...@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
SVGA_FIFO_3D_HWVERSION)); SVGA_FIFO_3D_HWVERSION));
break; break;
} }
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
param->value = dev_priv->memory_size;
break;
default: default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n", DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param); param->param);
......
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#define DRM_VMW_PARAM_FIFO_CAPS 4 #define DRM_VMW_PARAM_FIFO_CAPS 4
#define DRM_VMW_PARAM_MAX_FB_SIZE 5 #define DRM_VMW_PARAM_MAX_FB_SIZE 5
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
/** /**
* struct drm_vmw_getparam_arg * struct drm_vmw_getparam_arg
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment