Commit c60f7d5a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Pretty minor set of fixes for radeon, ttm and vmwgfx.  The ttm ones
  are a regression and an oops seen on server chipsets"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/vmwgfx: Fix a surface reference corner-case in legacy emulation mode
  drm/radeon/cik: properly set compute ring status on disable
  drm/radeon/cik: stop the sdma engines in the enable() function
  drm/radeon/cik: properly set sdma ring status on disable
  drm/radeon: fix runpm disabling on non-PX harder
  drm/ttm: don't oops if no invalidate_caches()
  drm/ttm: Work around performance regression with VM_PFNMAP
parents c14c06b7 f042cc4a
...@@ -4134,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) ...@@ -4134,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
{ {
if (enable) if (enable)
WREG32(CP_MEC_CNTL, 0); WREG32(CP_MEC_CNTL, 0);
else else {
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
udelay(50); udelay(50);
} }
......
...@@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) ...@@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
} }
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
} }
/** /**
...@@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable) ...@@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
u32 me_cntl, reg_offset; u32 me_cntl, reg_offset;
int i; int i;
if (enable == false) {
cik_sdma_gfx_stop(rdev);
cik_sdma_rlc_stop(rdev);
}
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (i == 0) if (i == 0)
reg_offset = SDMA0_REGISTER_OFFSET; reg_offset = SDMA0_REGISTER_OFFSET;
...@@ -420,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) ...@@ -420,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
if (!rdev->sdma_fw) if (!rdev->sdma_fw)
return -EINVAL; return -EINVAL;
/* stop the gfx rings and rlc compute queues */
cik_sdma_gfx_stop(rdev);
cik_sdma_rlc_stop(rdev);
/* halt the MEs */ /* halt the MEs */
cik_sdma_enable(rdev, false); cik_sdma_enable(rdev, false);
...@@ -492,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev) ...@@ -492,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
*/ */
void cik_sdma_fini(struct radeon_device *rdev) void cik_sdma_fini(struct radeon_device *rdev)
{ {
/* stop the gfx rings and rlc compute queues */
cik_sdma_gfx_stop(rdev);
cik_sdma_rlc_stop(rdev);
/* halt the MEs */ /* halt the MEs */
cik_sdma_enable(rdev, false); cik_sdma_enable(rdev, false);
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
......
...@@ -33,6 +33,13 @@ ...@@ -33,6 +33,13 @@
#include <linux/vga_switcheroo.h> #include <linux/vga_switcheroo.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_is_px(void);
#else
static inline bool radeon_is_px(void) { return false; }
#endif
/** /**
* radeon_driver_unload_kms - Main unload function for KMS. * radeon_driver_unload_kms - Main unload function for KMS.
* *
...@@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) ...@@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n"); "Error during ACPI methods call\n");
} }
if (radeon_runtime_pm != 0) { if ((radeon_runtime_pm == 1) ||
((radeon_runtime_pm == -1) && radeon_is_px())) {
pm_runtime_use_autosuspend(dev->dev); pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev); pm_runtime_set_active(dev->dev);
......
...@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
moved: moved:
if (bo->evicted) { if (bo->evicted) {
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); if (bdev->driver->invalidate_caches) {
if (ret) ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
pr_err("Can not flush read caches\n"); if (ret)
pr_err("Can not flush read caches\n");
}
bo->evicted = false; bo->evicted = false;
} }
......
...@@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, ...@@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
vma->vm_private_data = bo; vma->vm_private_data = bo;
/* /*
* PFNMAP is faster than MIXEDMAP due to reduced page * We'd like to use VM_PFNMAP on shared mappings, where
* administration. So use MIXEDMAP only if private VMA, where * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
* we need to support COW. * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
* bad for performance. Until that has been sorted out, use
* VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
*/ */
vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0; return 0;
out_unref: out_unref:
...@@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) ...@@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_ops = &ttm_bo_vm_ops; vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo); vma->vm_private_data = ttm_bo_reference(bo);
vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND; vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0; return 0;
} }
......
...@@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unlock; goto out_unlock;
/*
* A gb-aware client referencing a shared surface will
* expect a backup buffer to be present.
*/
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
res->backup_size,
true,
&backup_handle,
&res->backup);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
}
tmp = vmw_resource_reference(&srf->res); tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE, req->shareable, VMW_RES_SURFACE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment