Commit 5b2eef96 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (390 commits)
  drm/radeon/kms: disable underscan by default
  drm/radeon/kms: only enable hdmi features if the monitor supports audio
  drm: Restore the old_fb upon modeset failure
  drm/nouveau: fix hwmon device binding
  radeon: consolidate asic-specific function decls for pre-r600
  vga_switcheroo: comparing too few characters in strncmp()
  drm/radeon/kms: add NI pci ids
  drm/radeon/kms: don't enable pcie gen2 on NI yet
  drm/radeon/kms: add radeon_asic struct for NI asics
  drm/radeon/kms/ni: load default sclk/mclk/vddc at pm init
  drm/radeon/kms: add ucode loader for NI
  drm/radeon/kms: add support for DCE5 display LUTs
  drm/radeon/kms: add ni_reg.h
  drm/radeon/kms: add bo blit support for NI
  drm/radeon/kms: always use writeback/events for fences on NI
  drm/radeon/kms: adjust default clock/vddc tracking for pm on DCE5
  drm/radeon/kms: add backend map workaround for barts
  drm/radeon/kms: fill gpu init for NI asics
  drm/radeon/kms: add disabled vbios accessor for NI asics
  drm/radeon/kms: handle NI thermal controller
  ...
parents 8adbf8d4 56bec7c0
......@@ -120,7 +120,6 @@ struct agp_bridge_driver {
void (*agp_destroy_page)(struct page *, int flags);
void (*agp_destroy_pages)(struct agp_memory *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
void (*chipset_flush)(struct agp_bridge_data *);
};
struct agp_bridge_data {
......
......@@ -276,7 +276,6 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case AGPIOC_CHIPSET_FLUSH32:
ret_val = agpioc_chipset_flush_wrap(curr_priv);
break;
}
......
......@@ -102,6 +102,5 @@ void agp_free_memory_wrap(struct agp_memory *memory);
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
struct agp_memory *agp_find_mem_by_key(int key);
struct agp_client *agp_find_client_by_pid(pid_t id);
int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
#endif /* _AGP_COMPAT_H */
......@@ -957,13 +957,6 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
return agp_unbind_memory(memory);
}
int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
{
DBG("");
agp_flush_chipset(agp_bridge);
return 0;
}
static long agp_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
......@@ -1039,7 +1032,6 @@ static long agp_ioctl(struct file *file,
break;
case AGPIOC_CHIPSET_FLUSH:
ret_val = agpioc_chipset_flush_wrap(curr_priv);
break;
}
......
......@@ -81,13 +81,6 @@ static int agp_get_key(void)
return -1;
}
void agp_flush_chipset(struct agp_bridge_data *bridge)
{
if (bridge->driver->chipset_flush)
bridge->driver->chipset_flush(bridge);
}
EXPORT_SYMBOL(agp_flush_chipset);
/*
* Use kmalloc if possible for the page list. Otherwise fall back to
* vmalloc. This speeds things up and also saves memory for small AGP
......@@ -487,26 +480,6 @@ int agp_unbind_memory(struct agp_memory *curr)
}
EXPORT_SYMBOL(agp_unbind_memory);
/**
* agp_rebind_emmory - Rewrite the entire GATT, useful on resume
*/
int agp_rebind_memory(void)
{
struct agp_memory *curr;
int ret_val = 0;
spin_lock(&agp_bridge->mapped_lock);
list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
ret_val = curr->bridge->driver->insert_memory(curr,
curr->pg_start,
curr->type);
if (ret_val != 0)
break;
}
spin_unlock(&agp_bridge->mapped_lock);
return ret_val;
}
EXPORT_SYMBOL(agp_rebind_memory);
/* End - Routines for handling swapping of agp_memory into the GATT */
......
......@@ -828,14 +828,9 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
static int agp_intel_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
int ret_val;
bridge->driver->configure();
ret_val = agp_rebind_memory();
if (ret_val != 0)
return ret_val;
return 0;
}
#endif
......
......@@ -75,6 +75,8 @@
#define I810_GMS_DISABLE 0x00000000
#define I810_PGETBL_CTL 0x2020
#define I810_PGETBL_ENABLED 0x00000001
/* Note: PGETBL_CTL2 has a different offset on G33. */
#define I965_PGETBL_CTL2 0x20c4
#define I965_PGETBL_SIZE_MASK 0x0000000e
#define I965_PGETBL_SIZE_512KB (0 << 1)
#define I965_PGETBL_SIZE_256KB (1 << 1)
......@@ -82,9 +84,15 @@
#define I965_PGETBL_SIZE_1MB (3 << 1)
#define I965_PGETBL_SIZE_2MB (4 << 1)
#define I965_PGETBL_SIZE_1_5MB (5 << 1)
#define G33_PGETBL_SIZE_MASK (3 << 8)
#define G33_PGETBL_SIZE_1M (1 << 8)
#define G33_PGETBL_SIZE_2M (2 << 8)
#define G33_GMCH_SIZE_MASK (3 << 8)
#define G33_GMCH_SIZE_1M (1 << 8)
#define G33_GMCH_SIZE_2M (2 << 8)
#define G4x_GMCH_SIZE_MASK (0xf << 8)
#define G4x_GMCH_SIZE_1M (0x1 << 8)
#define G4x_GMCH_SIZE_2M (0x3 << 8)
#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
......
This diff is collapsed.
......@@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_agp_bind_pages);
void drm_agp_chipset_flush(struct drm_device *dev)
{
agp_flush_chipset(dev->agp->bridge);
}
EXPORT_SYMBOL(drm_agp_chipset_flush);
#endif /* __OS_HAS_AGP */
......@@ -336,7 +336,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
......@@ -350,6 +350,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!crtc->enabled)
return true;
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
......@@ -427,11 +428,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
/* Store real post-adjustment hardware mode. */
crtc->hwmode = *adjusted_mode;
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc);
/* XXX free adjustedmode */
drm_mode_destroy(dev, adjusted_mode);
/* FIXME: add subpixel order */
done:
if (!ret) {
crtc->hwmode = saved_hwmode;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
......@@ -650,6 +661,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
old_fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
set->crtc->fb = old_fb;
ret = -EINVAL;
goto fail;
}
......@@ -664,8 +676,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, old_fb);
if (ret != 0)
if (ret != 0) {
set->crtc->fb = old_fb;
goto fail;
}
}
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
......
......@@ -607,6 +607,25 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_TRUECOLOR;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->fix.line_length = fb->pitch;
return;
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
......@@ -816,6 +835,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
drm_fb_helper_fill_fix(info, fb_helper->fb);
}
mutex_unlock(&dev->mode_config.mutex);
......@@ -953,6 +973,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (new_fb) {
info->var.pixclock = 0;
drm_fb_helper_fill_fix(info, fb_helper->fb);
if (register_framebuffer(info) < 0) {
return -EINVAL;
}
......@@ -979,24 +1000,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_TRUECOLOR;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->fix.line_length = pitch;
return;
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
......@@ -1005,6 +1008,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
......@@ -1530,3 +1534,24 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
*/
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
static int __init drm_fb_helper_modinit(void)
{
const char *name = "fbcon";
struct module *fbcon;
mutex_lock(&module_mutex);
fbcon = find_module(name);
mutex_unlock(&module_mutex);
if (!fbcon)
request_module_nowait(name);
return 0;
}
module_init(drm_fb_helper_modinit);
#endif
......@@ -236,6 +236,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
......
This diff is collapsed.
......@@ -392,9 +392,35 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_check_range = 0;
}
EXPORT_SYMBOL(drm_mm_init_scan);
/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
* hole. This version is for range-restricted scans.
*
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
/**
* Add a node to the scan list that might be freed to make space for the desired
* hole.
......@@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct list_head *prev_free, *next_free;
struct drm_mm_node *prev_node, *next_node;
unsigned long adj_start;
unsigned long adj_end;
mm->scanned_blocks++;
......@@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
if (check_free_hole(node->start, node->start + node->size,
if (mm->scan_check_range) {
adj_start = node->start < mm->scan_start ?
mm->scan_start : node->start;
adj_end = node->start + node->size > mm->scan_end ?
mm->scan_end : node->start + node->size;
} else {
adj_start = node->start;
adj_end = node->start + node->size;
}
if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
......
......@@ -40,12 +40,22 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
struct idr drm_minors_idr;
......
......@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
......
This diff is collapsed.
This diff is collapsed.
......@@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
.gen = 4, .is_crestline = 1,
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
};
......@@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
.gen = 4, .is_g4x = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.has_bsd_ring = 1,
......@@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1,
.need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0, /* disabled due to buggy hardware */
.has_bsd_ring = 1,
};
......@@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
};
......@@ -244,10 +245,34 @@ void intel_detect_pch (struct drm_device *dev)
}
}
void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
udelay(10);
I915_WRITE_NOTRACE(FORCEWAKE, 1);
POSTING_READ(FORCEWAKE);
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
udelay(10);
}
void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
......@@ -284,7 +309,9 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
if (state.event == PM_EVENT_PRETHAW)
return 0;
drm_kms_helper_poll_disable(dev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
error = i915_drm_freeze(dev);
if (error)
......@@ -304,6 +331,12 @@ static int i915_drm_thaw(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
i915_restore_state(dev);
intel_opregion_setup(dev);
......@@ -332,6 +365,9 @@ int i915_resume(struct drm_device *dev)
{
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (pci_enable_device(dev->pdev))
return -EIO;
......@@ -405,6 +441,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
static int gen6_do_reset(struct drm_device *dev, u8 flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
}
/**
* i965_reset - reset chip after a hang
* @dev: drm device to reset
......@@ -431,7 +475,8 @@ int i915_reset(struct drm_device *dev, u8 flags)
bool need_display = true;
int ret;
mutex_lock(&dev->struct_mutex);
if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY;
i915_gem_reset(dev);
......@@ -439,6 +484,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
case 6:
ret = gen6_do_reset(dev, flags);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
break;
......@@ -472,9 +520,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
struct intel_ring_buffer *ring = &dev_priv->render_ring;
dev_priv->mm.suspended = 0;
ring->init(dev, ring);
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
if (HAS_BSD(dev))
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
if (HAS_BLT(dev))
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
......@@ -523,6 +576,9 @@ static int i915_pm_suspend(struct device *dev)
return -ENODEV;
}
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
error = i915_drm_freeze(drm_dev);
if (error)
return error;
......@@ -606,6 +662,8 @@ static struct drm_driver driver = {
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
.get_vblank_timestamp = i915_get_vblank_timestamp,
.get_scanout_position = i915_get_crtc_scanoutpos,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
......@@ -661,8 +719,6 @@ static int __init i915_init(void)
driver.num_ioctls = i915_max_ioctl;
i915_gem_shrinker_init();
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
......@@ -684,17 +740,11 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
if (!(driver.driver_features & DRIVER_MODESET)) {
driver.suspend = i915_suspend;
driver.resume = i915_resume;
}
return drm_init(&driver);
}
static void __exit i915_exit(void)
{
i915_gem_shrinker_exit();
drm_exit(&driver);
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
}
void
i915_gem_dump_object(struct drm_gem_object *obj, int len,
i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
......@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
i915_gem_dump_page(obj_priv->pages[page],
i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len,
obj_priv->gtt_offset +
obj->gtt_offset +
page * PAGE_SIZE,
mark);
}
......@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_device *dev = obj->base.dev;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj_priv->gtt_offset, handle,
__func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
obj->size);
gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
......@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
backing_map = kmap_atomic(obj->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
......@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
(int)(obj_priv->gtt_offset +
(int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
......
......@@ -32,28 +32,36 @@
#include "i915_drm.h"
static bool
mark_free(struct drm_i915_gem_object *obj_priv,
struct list_head *unwind)
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
list_add(&obj_priv->evict_list, unwind);
drm_gem_object_reference(&obj_priv->base);
return drm_mm_scan_add_block(obj_priv->gtt_space);
list_add(&obj->exec_list, unwind);
drm_gem_object_reference(&obj->base);
return drm_mm_scan_add_block(obj->gtt_space);
}
int
i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj_priv;
struct drm_i915_gem_object *obj;
int ret = 0;
i915_gem_retire_requests(dev);
/* Re-check for free space after retiring requests */
if (drm_mm_search_free(&dev_priv->mm.gtt_space,
min_size, alignment, 0))
return 0;
if (mappable) {
if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
min_size, alignment, 0,
dev_priv->mm.gtt_mappable_end,
0))
return 0;
} else {
if (drm_mm_search_free(&dev_priv->mm.gtt_space,
min_size, alignment, 0))
return 0;
}
/*
* The goal is to evict objects and amalgamate space in LRU order.
......@@ -79,45 +87,50 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
*/
INIT_LIST_HEAD(&unwind_list);
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
alignment, 0,
dev_priv->mm.gtt_mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
if (mark_free(obj_priv, &unwind_list))
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
if (obj_priv->base.write_domain || obj_priv->pin_count)
if (obj->base.write_domain || obj->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
if (obj_priv->pin_count)
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (obj->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
if (! obj_priv->base.write_domain || obj_priv->pin_count)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (! obj->base.write_domain || obj->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
if (mark_free(obj, &unwind_list))
goto found;
}
/* Nothing found, clean up and bail out! */
list_for_each_entry(obj_priv, &unwind_list, evict_list) {
ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
list_for_each_entry(obj, &unwind_list, exec_list) {
ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
drm_gem_object_unreference(&obj_priv->base);
drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
......@@ -131,33 +144,33 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
obj_priv = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
evict_list);
if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
list_move(&obj_priv->evict_list, &eviction_list);
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
if (drm_mm_scan_remove_block(obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list);
continue;
}
list_del(&obj_priv->evict_list);
drm_gem_object_unreference(&obj_priv->base);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
obj_priv = list_first_entry(&eviction_list,
struct drm_i915_gem_object,
evict_list);
obj = list_first_entry(&eviction_list,
struct drm_i915_gem_object,
exec_list);
if (ret == 0)
ret = i915_gem_object_unbind(&obj_priv->base);
list_del(&obj_priv->evict_list);
drm_gem_object_unreference(&obj_priv->base);
ret = i915_gem_object_unbind(obj);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
return ret;
}
int
i915_gem_evict_everything(struct drm_device *dev)
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
......@@ -176,36 +189,22 @@ i915_gem_evict_everything(struct drm_device *dev)
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
ret = i915_gem_evict_inactive(dev);
if (ret)
return ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
BUG_ON(!lists_empty);
return 0;
return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
i915_gem_evict_inactive(struct drm_device *dev)
i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
while (!list_empty(&dev_priv->mm.inactive_list)) {
struct drm_gem_object *obj;
int ret;
obj = &list_first_entry(&dev_priv->mm.inactive_list,
struct drm_i915_gem_object,
mm_list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
DRM_ERROR("Error unbinding object: %d\n", ret);
return ret;
struct drm_i915_gem_object *obj, *next;
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
int ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
}
......
This diff is collapsed.
/*
* Copyright © 2010 Daniel Vetter
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
obj->gtt_space->start
>> PAGE_SHIFT,
obj->agp_type);
} else
intel_gtt_insert_pages(obj->gtt_space->start
>> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
obj->agp_type);
}
intel_gtt_chipset_flush();
}
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (dev_priv->mm.gtt->needs_dmar) {
ret = intel_gtt_map_memory(obj->pages,
obj->base.size >> PAGE_SHIFT,
&obj->sg_list,
&obj->num_sg);
if (ret != 0)
return ret;
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
obj->gtt_space->start >> PAGE_SHIFT,
obj->agp_type);
} else
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
obj->agp_type);
return 0;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->mm.gtt->needs_dmar) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
obj->num_sg = 0;
}
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -235,6 +235,7 @@ static void i915_restore_vga(struct drm_device *dev)
static void i915_save_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
......@@ -367,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev)
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 6:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
return;
}
......@@ -375,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
break;
}
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
......@@ -771,8 +817,14 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
if (HAS_PCH_SPLIT(dev))
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
if (IS_GEN6(dev))
gen6_disable_rps(dev);
/* XXX disabling the clock gating breaks suspend on gm45
intel_disable_clock_gating(dev);
*/
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
......@@ -788,28 +840,6 @@ int i915_save_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 6:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
return 0;
}
......@@ -823,27 +853,6 @@ int i915_restore_state(struct drm_device *dev)
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
break;
}
i915_restore_display(dev);
/* Interrupt state */
......@@ -860,13 +869,16 @@ int i915_restore_state(struct drm_device *dev)
}
/* Clock gating state */
intel_init_clock_gating(dev);
intel_enable_clock_gating(dev);
if (HAS_PCH_SPLIT(dev)) {
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
if (IS_GEN6(dev))
gen6_enable_rps(dev_priv);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
......
This diff is collapsed.
This diff is collapsed.
......@@ -1442,8 +1442,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
intel_wait_for_vblank(intel_dp->base.base.dev,
intel_crtc->pipe);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
if (IS_MOBILE(dev)) {
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (IS_MOBILE(dev))
intel_enable_asle(dev);
spin_unlock_irqrestore(&dev_priv->user_irq_lock,
irqflags);
}
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -1045,7 +1045,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
sdvox = SDVO_BORDER_ENABLE;
sdvox = 0;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
......@@ -1075,7 +1077,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
......
This diff is collapsed.
......@@ -10,7 +10,7 @@ config DRM_NOUVEAU
select FB
select FRAMEBUFFER_CONSOLE if !EMBEDDED
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
help
Choose this option for open-source nVidia support.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment