Commit 90643a24 authored by Rob Clark's avatar Rob Clark

drm/msm: ratelimit GEM related WARN_ON()s

If you mess something up, you don't really need to see the same warn on
splat 4000 times pumped out a slow debug UART port..
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Link: https://lore.kernel.org/r/20210405174532.1441497-2-robdclark@gmail.comSigned-off-by: default avatarRob Clark <robdclark@chromium.org>
parent a670ff57
...@@ -96,7 +96,7 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -96,7 +96,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
if (!msm_obj->pages) { if (!msm_obj->pages) {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
...@@ -180,7 +180,7 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj) ...@@ -180,7 +180,7 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
msm_gem_lock(obj); msm_gem_lock(obj);
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj); msm_gem_unlock(obj);
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
} }
...@@ -256,7 +256,7 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf) ...@@ -256,7 +256,7 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
goto out; goto out;
} }
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj); msm_gem_unlock(obj);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
...@@ -289,7 +289,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) ...@@ -289,7 +289,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
int ret; int ret;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
/* Make it mmapable */ /* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj); ret = drm_gem_create_mmap_offset(obj);
...@@ -318,7 +318,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, ...@@ -318,7 +318,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
vma = kzalloc(sizeof(*vma), GFP_KERNEL); vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) if (!vma)
...@@ -337,7 +337,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, ...@@ -337,7 +337,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
list_for_each_entry(vma, &msm_obj->vmas, list) { list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace) if (vma->aspace == aspace)
...@@ -363,7 +363,7 @@ put_iova_spaces(struct drm_gem_object *obj) ...@@ -363,7 +363,7 @@ put_iova_spaces(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
list_for_each_entry(vma, &msm_obj->vmas, list) { list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) { if (vma->aspace) {
...@@ -380,7 +380,7 @@ put_iova_vmas(struct drm_gem_object *obj) ...@@ -380,7 +380,7 @@ put_iova_vmas(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp; struct msm_gem_vma *vma, *tmp;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma); del_vma(vma);
...@@ -394,7 +394,7 @@ static int get_iova_locked(struct drm_gem_object *obj, ...@@ -394,7 +394,7 @@ static int get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
int ret = 0; int ret = 0;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
vma = lookup_vma(obj, aspace); vma = lookup_vma(obj, aspace);
...@@ -429,13 +429,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, ...@@ -429,13 +429,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (msm_obj->flags & MSM_BO_MAP_PRIV) if (msm_obj->flags & MSM_BO_MAP_PRIV)
prot |= IOMMU_PRIV; prot |= IOMMU_PRIV;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY; return -EBUSY;
vma = lookup_vma(obj, aspace); vma = lookup_vma(obj, aspace);
if (WARN_ON(!vma)) if (GEM_WARN_ON(!vma))
return -EINVAL; return -EINVAL;
pages = get_pages(obj); pages = get_pages(obj);
...@@ -453,7 +453,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, ...@@ -453,7 +453,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
u64 local; u64 local;
int ret; int ret;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
ret = get_iova_locked(obj, aspace, &local, ret = get_iova_locked(obj, aspace, &local,
range_start, range_end); range_start, range_end);
...@@ -524,7 +524,7 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj, ...@@ -524,7 +524,7 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
msm_gem_lock(obj); msm_gem_lock(obj);
vma = lookup_vma(obj, aspace); vma = lookup_vma(obj, aspace);
msm_gem_unlock(obj); msm_gem_unlock(obj);
WARN_ON(!vma); GEM_WARN_ON(!vma);
return vma ? vma->iova : 0; return vma ? vma->iova : 0;
} }
...@@ -537,11 +537,11 @@ void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, ...@@ -537,11 +537,11 @@ void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
{ {
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
vma = lookup_vma(obj, aspace); vma = lookup_vma(obj, aspace);
if (!WARN_ON(!vma)) if (!GEM_WARN_ON(!vma))
msm_gem_unmap_vma(aspace, vma); msm_gem_unmap_vma(aspace, vma);
} }
...@@ -593,12 +593,12 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) ...@@ -593,12 +593,12 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0; int ret = 0;
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
if (obj->import_attach) if (obj->import_attach)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
if (WARN_ON(msm_obj->madv > madv)) { if (GEM_WARN_ON(msm_obj->madv > madv)) {
DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv); msm_obj->madv, madv);
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
...@@ -664,8 +664,8 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) ...@@ -664,8 +664,8 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
WARN_ON(msm_obj->vmap_count < 1); GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--; msm_obj->vmap_count--;
} }
...@@ -707,8 +707,8 @@ void msm_gem_purge(struct drm_gem_object *obj) ...@@ -707,8 +707,8 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!is_purgeable(msm_obj)); GEM_WARN_ON(!is_purgeable(msm_obj));
WARN_ON(obj->import_attach); GEM_WARN_ON(obj->import_attach);
put_iova_spaces(obj); put_iova_spaces(obj);
...@@ -739,9 +739,9 @@ void msm_gem_vunmap(struct drm_gem_object *obj) ...@@ -739,9 +739,9 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return; return;
vunmap(msm_obj->vaddr); vunmap(msm_obj->vaddr);
...@@ -789,9 +789,9 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) ...@@ -789,9 +789,9 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
struct msm_drm_private *priv = obj->dev->dev_private; struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep(); might_sleep();
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
WARN_ON(msm_obj->dontneed); GEM_WARN_ON(msm_obj->dontneed);
if (msm_obj->active_count++ == 0) { if (msm_obj->active_count++ == 0) {
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
...@@ -806,7 +806,7 @@ void msm_gem_active_put(struct drm_gem_object *obj) ...@@ -806,7 +806,7 @@ void msm_gem_active_put(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
might_sleep(); might_sleep();
WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
if (--msm_obj->active_count == 0) { if (--msm_obj->active_count == 0) {
update_inactive(msm_obj); update_inactive(msm_obj);
...@@ -818,7 +818,7 @@ static void update_inactive(struct msm_gem_object *msm_obj) ...@@ -818,7 +818,7 @@ static void update_inactive(struct msm_gem_object *msm_obj)
struct msm_drm_private *priv = msm_obj->base.dev->dev_private; struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
WARN_ON(msm_obj->active_count != 0); GEM_WARN_ON(msm_obj->active_count != 0);
if (msm_obj->dontneed) if (msm_obj->dontneed)
mark_unpurgeable(msm_obj); mark_unpurgeable(msm_obj);
...@@ -830,7 +830,7 @@ static void update_inactive(struct msm_gem_object *msm_obj) ...@@ -830,7 +830,7 @@ static void update_inactive(struct msm_gem_object *msm_obj)
list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
mark_purgeable(msm_obj); mark_purgeable(msm_obj);
} else { } else {
WARN_ON(msm_obj->madv != __MSM_MADV_PURGED); GEM_WARN_ON(msm_obj->madv != __MSM_MADV_PURGED);
list_add_tail(&msm_obj->mm_list, &priv->inactive_purged); list_add_tail(&msm_obj->mm_list, &priv->inactive_purged);
} }
...@@ -1010,12 +1010,12 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -1010,12 +1010,12 @@ void msm_gem_free_object(struct drm_gem_object *obj)
msm_gem_lock(obj); msm_gem_lock(obj);
/* object should not be on active list: */ /* object should not be on active list: */
WARN_ON(is_active(msm_obj)); GEM_WARN_ON(is_active(msm_obj));
put_iova_spaces(obj); put_iova_spaces(obj);
if (obj->import_attach) { if (obj->import_attach) {
WARN_ON(msm_obj->vaddr); GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not /* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated: * ours, just free the array we allocated:
...@@ -1131,7 +1131,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, ...@@ -1131,7 +1131,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
use_vram = true; use_vram = true;
if (WARN_ON(use_vram && !priv->vram.size)) if (GEM_WARN_ON(use_vram && !priv->vram.size))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* Disallow zero sized objects as they make the underlying /* Disallow zero sized objects as they make the underlying
......
...@@ -11,6 +11,11 @@ ...@@ -11,6 +11,11 @@
#include <linux/dma-resv.h> #include <linux/dma-resv.h>
#include "msm_drv.h" #include "msm_drv.h"
/* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
* tend to go wrong 1000s of times in a short timespan.
*/
#define GEM_WARN_ON(x) WARN_RATELIMIT(x, "%s", __stringify(x))
/* Additional internal-use only BO flags: */ /* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */ #define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
...@@ -203,7 +208,7 @@ msm_gem_is_locked(struct drm_gem_object *obj) ...@@ -203,7 +208,7 @@ msm_gem_is_locked(struct drm_gem_object *obj)
static inline bool is_active(struct msm_gem_object *msm_obj) static inline bool is_active(struct msm_gem_object *msm_obj)
{ {
WARN_ON(!msm_gem_is_locked(&msm_obj->base)); GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
return msm_obj->active_count; return msm_obj->active_count;
} }
...@@ -221,7 +226,7 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj) ...@@ -221,7 +226,7 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj)
static inline bool is_vunmapable(struct msm_gem_object *msm_obj) static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{ {
WARN_ON(!msm_gem_is_locked(&msm_obj->base)); GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
return (msm_obj->vmap_count == 0) && msm_obj->vaddr; return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
} }
...@@ -229,12 +234,12 @@ static inline void mark_purgeable(struct msm_gem_object *msm_obj) ...@@ -229,12 +234,12 @@ static inline void mark_purgeable(struct msm_gem_object *msm_obj)
{ {
struct msm_drm_private *priv = msm_obj->base.dev->dev_private; struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock)); GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unpurgeable(msm_obj)) if (is_unpurgeable(msm_obj))
return; return;
if (WARN_ON(msm_obj->dontneed)) if (GEM_WARN_ON(msm_obj->dontneed))
return; return;
priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT; priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
...@@ -245,16 +250,16 @@ static inline void mark_unpurgeable(struct msm_gem_object *msm_obj) ...@@ -245,16 +250,16 @@ static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
{ {
struct msm_drm_private *priv = msm_obj->base.dev->dev_private; struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock)); GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unpurgeable(msm_obj)) if (is_unpurgeable(msm_obj))
return; return;
if (WARN_ON(!msm_obj->dontneed)) if (GEM_WARN_ON(!msm_obj->dontneed))
return; return;
priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT; priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
WARN_ON(priv->shrinkable_count < 0); GEM_WARN_ON(priv->shrinkable_count < 0);
msm_obj->dontneed = false; msm_obj->dontneed = false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment