Commit 1a589c04 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.11-rc6' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "This is just mostly stuff that missed rc5, from vmwgfx and msm
  drivers"

* tag 'drm-fixes-for-v4.11-rc6' of git://people.freedesktop.org/~airlied/linux:
  drm/msm: Make sure to detach the MMU during GPU cleanup
  drm/msm/hdmi: redefinitions of macros not required
  drm/msm/mdp5: Update SSPP_MAX value
  drm/msm/dsi: Fix bug in dsi_mgr_phy_enable
  drm/msm: Don't allow zero sized buffer objects
  drm/msm: Fix wrong pointer check in a5xx_destroy
  drm/msm: adreno: fix build error without debugfs
  drm/vmwgfx: fix integer overflow in vmw_surface_define_ioctl()
  drm/vmwgfx: Remove getparam error message
  drm/ttm: Avoid calling drm_ht_remove from atomic context
  drm/ttm, drm/vmwgfx: Relax permission checking when opening surfaces
  drm/vmwgfx: avoid calling vzalloc with a 0 size in vmw_get_cap_3d_ioctl()
  drm/vmwgfx: NULL pointer dereference in vmw_surface_define_ioctl()
  drm/vmwgfx: Type-check lookups of fence objects
parents 08e4e0d0 130e35e4
/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
......@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
}
if (a5xx_gpu->gpmu_bo) {
if (a5xx_gpu->gpmu_bo)
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
}
......@@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = {
.idle = a5xx_idle,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
#ifdef CONFIG_DEBUG_FS
.show = a5xx_show,
#endif
},
.get_timestamp = a5xx_get_timestamp,
};
......
......@@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return 0;
}
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
if (gpu->memptrs_bo) {
if (gpu->memptrs)
msm_gem_put_vaddr(gpu->memptrs_bo);
struct msm_gpu *gpu = &adreno_gpu->base;
if (adreno_gpu->memptrs_bo) {
if (adreno_gpu->memptrs)
msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
if (adreno_gpu->memptrs_iova)
msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
}
release_firmware(adreno_gpu->pm4);
release_firmware(adreno_gpu->pfp);
if (gpu->memptrs_iova)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
msm_gpu_cleanup(gpu);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
if (gpu->aspace) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(gpu->aspace);
}
release_firmware(gpu->pm4);
release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
}
......@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
}
}
} else {
msm_dsi_host_reset_phy(mdsi->host);
msm_dsi_host_reset_phy(msm_dsi->host);
ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
if (ret)
return ret;
......
......@@ -18,13 +18,6 @@
#include <linux/hdmi.h>
#include "hdmi.h"
/* Supported HDMI Audio channels */
#define MSM_HDMI_AUDIO_CHANNEL_2 0
#define MSM_HDMI_AUDIO_CHANNEL_4 1
#define MSM_HDMI_AUDIO_CHANNEL_6 2
#define MSM_HDMI_AUDIO_CHANNEL_8 3
/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
static int nchannels[] = { 2, 4, 6, 8 };
......
......@@ -18,7 +18,8 @@
#ifndef __MDP5_PIPE_H__
#define __MDP5_PIPE_H__
#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
/* TODO: Add SSPP_MAX in mdp5.xml.h */
#define SSPP_MAX (SSPP_CURSOR1 + 1)
/* represents a hw pipe, which is dynamically assigned to a plane */
struct mdp5_hw_pipe {
......
......@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(size);
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
if (size == 0)
return ERR_PTR(-EINVAL);
ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
if (ret)
goto fail;
......
......@@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb);
}
if (gpu->aspace)
msm_gem_address_space_destroy(gpu->aspace);
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
}
......@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
if (unlikely(ret != 0))
goto out_err0;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
......@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed)
enum ttm_ref_type ref_type, bool *existed,
bool require_existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
......@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
}
rcu_read_unlock();
if (require_existed)
return -EPERM;
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
......@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
ttm_ref_object_release(&ref->kref);
}
spin_unlock(&tfile->lock);
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
spin_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
EXPORT_SYMBOL(ttm_object_file_release);
......@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
spin_lock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
spin_unlock(&tdev->object_lock);
kfree(tdev);
}
......@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
......
......@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
}
/**
* vmw_fence_obj_lookup - Look up a user-space fence object
*
* @tfile: A struct ttm_object_file identifying the caller.
* @handle: A handle identifying the fence object.
* @return: A struct vmw_user_fence base ttm object on success or
* an error pointer on failure.
*
* The fence object is looked up and type-checked. The caller needs
* to have opened the fence object first, but since that happens on
* creation and fence objects aren't shareable, that's not an
* issue currently.
*/
static struct ttm_base_object *
vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
{
struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
if (!base) {
pr_err("Invalid fence object handle 0x%08lx.\n",
(unsigned long)handle);
return ERR_PTR(-EINVAL);
}
if (base->refcount_release != vmw_user_fence_base_release) {
pr_err("Invalid fence object handle 0x%08lx.\n",
(unsigned long)handle);
ttm_base_object_unref(&base);
return ERR_PTR(-EINVAL);
}
return base;
}
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
......@@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
arg->kernel_cookie = jiffies + wait_timeout;
}
base = ttm_base_object_lookup(tfile, arg->handle);
if (unlikely(base == NULL)) {
printk(KERN_ERR "Wait invalid fence object handle "
"0x%08lx.\n",
(unsigned long)arg->handle);
return -EINVAL;
}
base = vmw_fence_obj_lookup(tfile, arg->handle);
if (IS_ERR(base))
return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
......@@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
base = ttm_base_object_lookup(tfile, arg->handle);
if (unlikely(base == NULL)) {
printk(KERN_ERR "Fence signaled invalid fence object handle "
"0x%08lx.\n",
(unsigned long)arg->handle);
return -EINVAL;
}
base = vmw_fence_obj_lookup(tfile, arg->handle);
if (IS_ERR(base))
return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fman_from_fence(fence);
......@@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
......@@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/
if (arg->handle) {
struct ttm_base_object *base =
ttm_base_object_lookup_for_ref(dev_priv->tdev,
arg->handle);
if (unlikely(base == NULL)) {
DRM_ERROR("Fence event invalid fence object handle "
"0x%08lx.\n",
(unsigned long)arg->handle);
return -EINVAL;
}
vmw_fence_obj_lookup(tfile, arg->handle);
if (IS_ERR(base))
return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
if (user_fence_rep != NULL) {
bool existed;
ret = ttm_ref_object_add(vmw_fp->tfile, base,
TTM_REF_USAGE, &existed);
TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
......@@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
return 0;
out_no_create:
if (user_fence_rep != NULL)
ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
......
......@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->has_dx;
break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
return -EINVAL;
}
......@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0)) {
if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
......
......@@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed);
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
......@@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL);
TTM_REF_USAGE, NULL, false);
}
/*
......
......@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
128;
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
return -EINVAL;
num_sizes += req->mip_levels[i];
}
if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
DRM_VMW_MAX_MIP_LEVELS)
if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
......@@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
uint32_t handle;
struct ttm_base_object *base;
int ret;
bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
if (unlikely(drm_is_render_client(file_priv))) {
DRM_ERROR("Render client refused legacy "
"surface reference.\n");
return -EACCES;
}
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
......@@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
/*
* Make sure the surface creator has the same
* authenticating master.
* authenticating master, or is already registered with us.
*/
if (drm_is_primary_client(file_priv) &&
user_srf->master != file_priv->master) {
DRM_ERROR("Trying to reference surface outside of"
" master domain.\n");
ret = -EACCES;
goto out_bad_resource;
}
user_srf->master != file_priv->master)
require_exist = true;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
......
......@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
* @ref_type: The type of reference.
* @existed: Upon completion, indicates that an identical reference object
* already existed, and the refcount was upped on that object instead.
* @require_existed: Fail with -EPERM if an identical ref object didn't
* already exist.
*
* Checks that the base object is shareable and adds a ref object to it.
*
......@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
*/
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed);
enum ttm_ref_type ref_type, bool *existed,
bool require_existed);
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment