Commit fd1f297b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2019-03-22' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "i915, amdgpu, vmwgfx, exynos, nouveau and udl fixes.

  Seems to be lots of little minor ones for regressions in rc1, and some
  cleanups. The exynos one is the largest one, and is for a hw
  difference between exynos versions"

* tag 'drm-fixes-2019-03-22' of git://anongit.freedesktop.org/drm/drm:
  drm/nouveau/dmem: empty chunk do not have a buffer object associated with them.
  drm/nouveau/debugfs: Fix check of pm_runtime_get_sync failure
  drm/nouveau/dmem: Fix a NULL vs IS_ERR() check
  drm/nouveau/dmem: remove set but not used variable 'drm'
  drm/exynos/mixer: fix MIXER shadow registry synchronisation code
  drm/vmwgfx: Don't double-free the mode stored in par->set_mode
  drm/vmwgfx: Return 0 when gmrid::get_node runs out of ID's
  drm/amdgpu: fix invalid use of change_bit
  drm/amdgpu: revert "cleanup setting bulk_movable"
  drm/i915: Sanity check mmap length against object size
  drm/i915: Fix off-by-one in reporting hanging process
  drm/i915/bios: assume eDP is present on port A when there is no VBT
  drm/udl: use drm_gem_object_put_unlocked.
parents d92da1fb 8e078788
......@@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_vm_bo_base *bo_base, *tmp;
int r = 0;
vm->bulk_moveable &= list_empty(&vm->evicted);
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
......
......@@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
}
ring->vm_inv_eng = inv_eng - 1;
change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
......
......@@ -20,6 +20,7 @@
#include "regs-vp.h"
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
......@@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
}
static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
static bool mixer_is_synced(struct mixer_context *ctx)
{
/* block update on vsync */
mixer_reg_writemask(ctx, MXR_STATUS, enable ?
MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
u32 base, shadow;
if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
ctx->mxr_ver == MXR_VER_128_0_0_184)
return !(mixer_reg_read(ctx, MXR_CFG) &
MXR_CFG_LAYER_UPDATE_COUNT_MASK);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
vp_reg_read(ctx, VP_SHADOW_UPDATE))
return false;
base = mixer_reg_read(ctx, MXR_CFG);
shadow = mixer_reg_read(ctx, MXR_CFG_S);
if (base != shadow)
return false;
base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
if (base != shadow)
return false;
base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
if (base != shadow)
return false;
return true;
}
static int mixer_wait_for_sync(struct mixer_context *ctx)
{
ktime_t timeout = ktime_add_us(ktime_get(), 100000);
while (!mixer_is_synced(ctx)) {
usleep_range(1000, 2000);
if (ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
}
return 0;
}
static void mixer_disable_sync(struct mixer_context *ctx)
{
mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
}
static void mixer_enable_sync(struct mixer_context *ctx)
{
if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
ctx->mxr_ver == MXR_VER_128_0_0_184)
mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
VP_SHADOW_UPDATE_ENABLE : 0);
vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
}
static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
......@@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
spin_lock_irqsave(&ctx->reg_slock, flags);
vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
/* interlace or progressive scan mode */
val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
......@@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_regs_dump(ctx);
}
static void mixer_layer_update(struct mixer_context *ctx)
{
mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
static void mixer_graph_buffer(struct mixer_context *ctx,
struct exynos_drm_plane *plane)
{
......@@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
mixer_cfg_layer(ctx, win, priority, true);
mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
/* layer update mandatory for mixer 16.0.33.0 */
if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
ctx->mxr_ver == MXR_VER_128_0_0_184)
mixer_layer_update(ctx);
spin_unlock_irqrestore(&ctx->reg_slock, flags);
mixer_regs_dump(ctx);
......@@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
struct mixer_context *ctx = arg;
u32 val, base, shadow;
u32 val;
spin_lock(&ctx->reg_slock);
......@@ -723,27 +760,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
val &= ~MXR_INT_STATUS_VSYNC;
/* interlace scan need to check shadow register */
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
vp_reg_read(ctx, VP_SHADOW_UPDATE))
goto out;
base = mixer_reg_read(ctx, MXR_CFG);
shadow = mixer_reg_read(ctx, MXR_CFG_S);
if (base != shadow)
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
&& !mixer_is_synced(ctx))
goto out;
base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
if (base != shadow)
goto out;
base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
if (base != shadow)
goto out;
}
drm_crtc_handle_vblank(&ctx->crtc->base);
}
......@@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
{
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_context *ctx = crtc->ctx;
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
mixer_vsync_set_update(mixer_ctx, false);
if (mixer_wait_for_sync(ctx))
dev_err(ctx->dev, "timeout waiting for VSYNC\n");
mixer_disable_sync(ctx);
}
static void mixer_update_plane(struct exynos_drm_crtc *crtc,
......@@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
mixer_vsync_set_update(mixer_ctx, true);
mixer_enable_sync(mixer_ctx);
exynos_crtc_handle_event(crtc);
}
......@@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
exynos_drm_pipe_clk_enable(crtc, true);
mixer_vsync_set_update(ctx, false);
mixer_disable_sync(ctx);
mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
......@@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
mixer_commit(ctx);
mixer_vsync_set_update(ctx, true);
mixer_enable_sync(ctx);
set_bit(MXR_BIT_POWERED, &ctx->flags);
}
......
......@@ -1734,8 +1734,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* pages from.
*/
if (!obj->base.filp) {
i915_gem_object_put(obj);
return -ENXIO;
addr = -ENXIO;
goto err;
}
if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
addr = -EINVAL;
goto err;
}
addr = vm_mmap(obj->base.filp, 0, args->size,
......@@ -1749,8 +1754,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
i915_gem_object_put(obj);
return -EINTR;
addr = -EINTR;
goto err;
}
vma = find_vma(mm, addr);
if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
......@@ -1768,12 +1773,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
i915_gem_object_put(obj);
args->addr_ptr = (u64)addr;
return 0;
err:
i915_gem_object_put(obj);
return addr;
}
......
......@@ -1721,7 +1721,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
i915_error_generate_code(error, engines));
if (engines) {
/* Just show the first executing process, more is confusing */
i = ffs(engines);
i = __ffs(engines);
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
......
......@@ -1673,6 +1673,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi;
info->supports_dp = (port != PORT_E);
info->supports_edp = (port == PORT_A);
}
}
......
......@@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
}
ret = pm_runtime_get_sync(drm->dev);
if (IS_ERR_VALUE(ret) && ret != -EACCES)
if (ret < 0 && ret != -EACCES)
return ret;
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
pm_runtime_put_autosuspend(drm->dev);
......
......@@ -100,12 +100,10 @@ static void
nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
{
struct nouveau_dmem_chunk *chunk;
struct nouveau_drm *drm;
unsigned long idx;
chunk = (void *)hmm_devmem_page_get_drvdata(page);
idx = page_to_pfn(page) - chunk->pfn_first;
drm = chunk->drm;
/*
* FIXME:
......@@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
/* FIXME handle pin failure */
WARN_ON(ret);
}
list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
/* FIXME handle pin failure */
WARN_ON(ret);
}
mutex_unlock(&drm->dmem->mutex);
}
......@@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
nouveau_bo_unpin(chunk->bo);
}
list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
nouveau_bo_unpin(chunk->bo);
}
mutex_unlock(&drm->dmem->mutex);
}
......@@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
*/
drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
device, size);
if (drm->dmem->devmem == NULL) {
if (IS_ERR(drm->dmem->devmem)) {
kfree(drm->dmem);
drm->dmem = NULL;
return;
......
......@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
drm_gem_object_put(&gobj->base);
drm_gem_object_put_unlocked(&gobj->base);
unlock:
mutex_unlock(&udl->gem_lock);
return ret;
......
......@@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info)
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
struct drm_display_mode *old_mode;
struct drm_display_mode *mode;
int ret;
old_mode = par->set_mode;
mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
if (!mode) {
DRM_ERROR("Could not create new fb mode.\n");
......@@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info)
mode->vdisplay = var->yres;
vmw_guess_mode_timing(mode);
if (old_mode && drm_mode_equal(old_mode, mode)) {
drm_mode_destroy(vmw_priv->dev, mode);
mode = old_mode;
old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
if (!vmw_kms_validate_mode_vram(vmw_priv,
mode->hdisplay *
DIV_ROUND_UP(var->bits_per_pixel, 8),
mode->vdisplay)) {
......@@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info)
schedule_delayed_work(&par->local_work, 0);
out_unlock:
if (old_mode)
drm_mode_destroy(vmw_priv->dev, old_mode);
if (par->set_mode)
drm_mode_destroy(vmw_priv->dev, par->set_mode);
par->set_mode = mode;
mutex_unlock(&par->bo_mutex);
......
......@@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
return id;
return (id != -ENOMEM ? 0 : id);
spin_lock(&gman->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment